From d912fd8eaaa56aac90f2b0b118c76f24ba8efa02 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 27 Nov 2017 21:00:13 +0000 Subject: COMPMID-617: Add validation to NEON functions. Adds validation to: - NECol2Im - NEIm2Col Change-Id: I346298583a6985ea793f71bb4527aa216a5cd4b2 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110835 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com Reviewed-by: Michalis Spyrou Reviewed-by: Anthony Barbier --- arm_compute/core/NEON/kernels/NECol2ImKernel.h | 16 ++++- arm_compute/core/NEON/kernels/NEIm2ColKernel.h | 13 ++++ arm_compute/runtime/NEON/functions/NECol2Im.h | 17 +++++- arm_compute/runtime/NEON/functions/NEIm2Col.h | 13 ++++ src/core/NEON/kernels/NECol2ImKernel.cpp | 58 +++++++++++++----- src/core/NEON/kernels/NEIm2ColKernel.cpp | 26 ++++++-- src/runtime/NEON/functions/NECol2Im.cpp | 7 ++- src/runtime/NEON/functions/NEConvolutionLayer.cpp | 2 +- src/runtime/NEON/functions/NEIm2Col.cpp | 5 ++ .../NEON/functions/NELocallyConnectedLayer.cpp | 2 +- tests/validation/NEON/Col2Im.cpp | 71 ++++++++++++++++++++++ tests/validation/NEON/Im2Col.cpp | 69 +++++++++++++++++++++ 12 files changed, 272 insertions(+), 27 deletions(-) create mode 100644 tests/validation/NEON/Col2Im.cpp create mode 100644 tests/validation/NEON/Im2Col.cpp diff --git a/arm_compute/core/NEON/kernels/NECol2ImKernel.h b/arm_compute/core/NEON/kernels/NECol2ImKernel.h index 93ec37a610..960e3021db 100644 --- a/arm_compute/core/NEON/kernels/NECol2ImKernel.h +++ b/arm_compute/core/NEON/kernels/NECol2ImKernel.h @@ -26,6 +26,8 @@ #include "arm_compute/core/NEON/INEKernel.h" +#include "arm_compute/core/Size2D.h" + namespace arm_compute { class ITensor; @@ -71,7 +73,17 @@ public: * while the rest represent batch of outputs. Data types supported: Same as @p input * @param[in] convolved_dims Output convolved dimensions. */ - void configure(const ITensor *input, ITensor *output, std::pair convolved_dims); + void configure(const ITensor *input, ITensor *output, const Size2D &convolved_dims); + /** Static function to check if given info will lead to a valid configuration of @ref NECol2ImKernel + * + * @param[in] input The input tensor to convert. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32 + * @param[in] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM], + * while the rest represent batch of outputs. Data types supported: Same as @p input + * @param[in] convolved_dims Output convolved dimensions. + * + * @return an error status + */ + static Error validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; @@ -93,7 +105,7 @@ private: Col2ImFunctionPtr _func; const ITensor *_input; ITensor *_output; - std::pair _convolved_dims; + Size2D _convolved_dims; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_NECOL2IMKERNEL_H__ */ diff --git a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h index 176799e824..5f9df5136b 100644 --- a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h +++ b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h @@ -81,6 +81,19 @@ public: * @param[in] has_bias In case biases are provided expands the matrix with 1. */ void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias); + /** Static function to check if given info will lead to a valid configuration of @ref NEIm2ColKernel + * + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32 + * Note: QASYMM8 works only for has_bias = false + * @param[in] output The output tensor. Data types supported: Same as @p input + * @param[in] kernel_dims The kernel dimensions (width and height). + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] has_bias In case biases are provided expands the matrix with 1. + * + * @return an error status + */ + static Error validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; diff --git a/arm_compute/runtime/NEON/functions/NECol2Im.h b/arm_compute/runtime/NEON/functions/NECol2Im.h index 7794d8673f..ba777c5b64 100644 --- a/arm_compute/runtime/NEON/functions/NECol2Im.h +++ b/arm_compute/runtime/NEON/functions/NECol2Im.h @@ -26,24 +26,35 @@ #include "arm_compute/runtime/NEON/INESimpleFunction.h" +#include "arm_compute/core/Size2D.h" #include "arm_compute/core/Types.h" namespace arm_compute { class ITensor; -/** Basic function to run @ref NECol2ImKernel */ +/** Basic function to run @ref NECol2Im */ class NECol2Im : public INESimpleFunction { public: /** Configure the col2im NEON kernel * - * @param[in] input The input tensor to convert. Data types supported: QS8/QS16/QASYMM8/F16/F32 + * @param[in] input The input tensor to convert. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32 * @param[out] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM], * while the rest represent batch of outputs. Data types supported: Same as @p input * @param[in] convolved_dims Output convolved dimensions. */ - void configure(const ITensor *input, ITensor *output, std::pair convolved_dims); + void configure(const ITensor *input, ITensor *output, const Size2D &convolved_dims); + /** Static function to check if given info will lead to a valid configuration of @ref NECol2Im + * + * @param[in] input The input tensor to convert. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32 + * @param[in] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM], + * while the rest represent batch of outputs. Data types supported: Same as @p input + * @param[in] convolved_dims Output convolved dimensions. + * + * @return an error status + */ + static Error validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims); }; } #endif /* __ARM_COMPUTE_NECOL2IM_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NEIm2Col.h b/arm_compute/runtime/NEON/functions/NEIm2Col.h index 913eafabea..4a498514a6 100644 --- a/arm_compute/runtime/NEON/functions/NEIm2Col.h +++ b/arm_compute/runtime/NEON/functions/NEIm2Col.h @@ -48,6 +48,19 @@ public: * @param[in] has_bias In case biases are provided expands the matrix with 1. */ void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias); + /** Static function to check if given info will lead to a valid configuration of @ref NEIm2Col + * + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32 + * Note: QASYMM8 works only for has_bias = false + * @param[in] output The output tensor. Data types supported: Same as @p input + * @param[in] kernel_dims The kernel dimensions (width and height). + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] has_bias In case biases are provided expands the matrix with 1. + * + * @return an error status + */ + static Error validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias); }; } #endif /* __ARM_COMPUTE_NEIM2COL_H__ */ diff --git a/src/core/NEON/kernels/NECol2ImKernel.cpp b/src/core/NEON/kernels/NECol2ImKernel.cpp index 68fc50ced6..ca769f73cc 100644 --- a/src/core/NEON/kernels/NECol2ImKernel.cpp +++ b/src/core/NEON/kernels/NECol2ImKernel.cpp @@ -36,6 +36,37 @@ using namespace arm_compute; +namespace +{ +TensorShape get_output_shape(const ITensorInfo *input, const Size2D &convolved_dims) +{ + TensorShape output_shape = input->tensor_shape(); + output_shape.set(0, convolved_dims.width); + output_shape.set(1, convolved_dims.height); + output_shape.set(2, input->tensor_shape()[0]); + + return output_shape; +} + +Error validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims) +{ + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::QASYMM8, + DataType::U16, DataType::S16, DataType::QS16, + DataType::U32, DataType::S32, + DataType::F16, DataType::F32); + + // Validate configured output + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), get_output_shape(input, convolved_dims)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output); + } + + return Error{}; +} +} // namespace + template void NECol2ImKernel::run_col2im(const Window &window) { @@ -55,7 +86,7 @@ void NECol2ImKernel::run_col2im(const Window &window) execute_window_loop(window, [&](const Coordinates & id) { const int hidx = id.y(); - const int idx = id.x() * output_stride_z + (hidx / _convolved_dims.first) * output_stride_y + (hidx % _convolved_dims.first) * output_stride_x; + const int idx = id.x() * output_stride_z + (hidx / _convolved_dims.width) * output_stride_y + (hidx % _convolved_dims.width) * output_stride_x; *(reinterpret_cast(out.ptr() + idx)) = *(reinterpret_cast(in.ptr())); }, @@ -67,24 +98,15 @@ NECol2ImKernel::NECol2ImKernel() { } -void NECol2ImKernel::configure(const ITensor *input, ITensor *output, std::pair convolved_dims) +void NECol2ImKernel::configure(const ITensor *input, ITensor *output, const Size2D &convolved_dims) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::QS16, DataType::U32, DataType::S32, - DataType::F16, - DataType::F32); - ARM_COMPUTE_ERROR_ON_NULLPTR(output); - - TensorShape output_shape = input->info()->tensor_shape(); - output_shape.set(0, convolved_dims.first); - output_shape.set(1, convolved_dims.second); - output_shape.set(2, input->info()->tensor_shape()[0]); + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position()); + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(get_output_shape(input->info(), convolved_dims))); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output); + // Perform validation step + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), convolved_dims)); _input = input; _output = output; @@ -117,6 +139,12 @@ void NECol2ImKernel::configure(const ITensor *input, ITensor *output, std::pair< INEKernel::configure(win); } +Error NECol2ImKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, convolved_dims)); + return Error{}; +} + void NECol2ImKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); diff --git a/src/core/NEON/kernels/NEIm2ColKernel.cpp b/src/core/NEON/kernels/NEIm2ColKernel.cpp index 090e554834..7a9b06ebda 100644 --- a/src/core/NEON/kernels/NEIm2ColKernel.cpp +++ b/src/core/NEON/kernels/NEIm2ColKernel.cpp @@ -42,6 +42,18 @@ using namespace arm_compute; namespace { +Error validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias) +{ + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::QASYMM8 && has_bias); + ARM_COMPUTE_UNUSED(kernel_dims); + ARM_COMPUTE_UNUSED(conv_info); + + return Error{}; +} + template inline void linearize_volume(const uint8_t *const in_ptr, T *out_ptr, @@ -278,10 +290,10 @@ NEIm2ColKernel::NEIm2ColKernel() void NEIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32, DataType::QS8, DataType::QS16, DataType::QASYMM8); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output); - ARM_COMPUTE_ERROR_ON(input->info()->data_type() == DataType::QASYMM8 && has_bias); + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + // Perform validation step + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias)); _input = input; _output = output; @@ -361,6 +373,12 @@ void NEIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size IKernel::configure(window); } +Error NEIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias)); + return Error{}; +} + void NEIm2ColKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); diff --git a/src/runtime/NEON/functions/NECol2Im.cpp b/src/runtime/NEON/functions/NECol2Im.cpp index 2dc4ebd49a..2a923f3730 100644 --- a/src/runtime/NEON/functions/NECol2Im.cpp +++ b/src/runtime/NEON/functions/NECol2Im.cpp @@ -28,9 +28,14 @@ using namespace arm_compute; -void NECol2Im::configure(const ITensor *input, ITensor *output, std::pair convolved_dims) +void NECol2Im::configure(const ITensor *input, ITensor *output, const Size2D &convolved_dims) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(input, output, convolved_dims); _kernel = std::move(k); } + +Error NECol2Im::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims) +{ + return NECol2ImKernel::validate(input, output, convolved_dims); +} diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp index 155f4e561a..865672e525 100644 --- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp @@ -292,7 +292,7 @@ void NEConvolutionLayer::configure(const ITensor *input, const ITensor *weights, } _input_im2col_reshaped.allocator()->allocate(); - _output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h)); + _output_col2im_kernel.configure(&_gemm_output, output, Size2D(conv_w, conv_h)); _gemm_output.allocator()->allocate(); ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one"); diff --git a/src/runtime/NEON/functions/NEIm2Col.cpp b/src/runtime/NEON/functions/NEIm2Col.cpp index 4ed591523b..354415daa3 100644 --- a/src/runtime/NEON/functions/NEIm2Col.cpp +++ b/src/runtime/NEON/functions/NEIm2Col.cpp @@ -34,3 +34,8 @@ void NEIm2Col::configure(const ITensor *input, ITensor *output, const Size2D &ke k->configure(input, output, kernel_dims, conv_info, has_bias); _kernel = std::move(k); } + +Error NEIm2Col::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias) +{ + return NEIm2ColKernel::validate(input, output, kernel_dims, conv_info, has_bias); +} diff --git a/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp b/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp index cb48598921..b29b796276 100644 --- a/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp @@ -111,7 +111,7 @@ void NELocallyConnectedLayer::configure(const ITensor *input, const ITensor *wei _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias); _weights_reshape_kernel.configure(weights, biases, &_weights_reshaped); _mm_kernel.configure(&_input_im2col_reshaped, &_weights_reshaped, &_gemm_output); - _output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h)); + _output_col2im_kernel.configure(&_gemm_output, output, Size2D(conv_w, conv_h)); // Allocate intermediate tensors _weights_reshaped.allocator()->allocate(); diff --git a/tests/validation/NEON/Col2Im.cpp b/tests/validation/NEON/Col2Im.cpp new file mode 100644 index 0000000000..c835c27f18 --- /dev/null +++ b/tests/validation/NEON/Col2Im.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NECol2Im.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(NEON) +TEST_SUITE(Col2Im) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::S64), // Unsupported data type + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Invalid output shape + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), + TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F32), + })), + framework::dataset::make("ConvolvedWidth", { 3, 3, 3, 3, 3 })), + framework::dataset::make("ConvolvedHeight", { 4, 4, 4, 4, 4 })), + framework::dataset::make("Expected", { true, true, true, true, false })), + input_info, output_info, convolved_width, convolved_height, expected) +{ + bool err = bool(NECol2Im::validate(&input_info, &output_info, Size2D(convolved_width, convolved_height))); + ARM_COMPUTE_EXPECT(err == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/NEON/Im2Col.cpp b/tests/validation/NEON/Im2Col.cpp new file mode 100644 index 0000000000..b05b8daed1 --- /dev/null +++ b/tests/validation/NEON/Im2Col.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEIm2Col.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(NEON) +TEST_SUITE(Im2Col) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::U8), // Unsupported data type + TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point + TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Bias not supported with QASYMM8 + TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), + TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8), + })), + framework::dataset::make("HasBias", { true, true, true, true, false })), + framework::dataset::make("Expected", { true, true, true, true, false })), + input_info, output_info, has_bias, expected) +{ + bool err = bool(NEIm2Col::validate(&input_info, &output_info, Size2D(3U, 3U), PadStrideInfo(), has_bias)); + ARM_COMPUTE_EXPECT(err == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute -- cgit v1.2.1