diff options
-rw-r--r-- | arm_compute/core/CL/CLKernels.h | 1 | ||||
-rw-r--r-- | arm_compute/core/CL/kernels/CLReorgLayerKernel.h | 79 | ||||
-rw-r--r-- | arm_compute/core/NEON/kernels/NEReorgLayerKernel.h | 4 | ||||
-rw-r--r-- | arm_compute/core/utils/misc/ShapeCalculator.h | 18 | ||||
-rw-r--r-- | arm_compute/runtime/CL/CLFunctions.h | 1 | ||||
-rw-r--r-- | arm_compute/runtime/CL/functions/CLReorgLayer.h | 62 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEReorgLayer.h | 2 | ||||
-rw-r--r-- | src/core/CL/CLKernelLibrary.cpp | 6 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/reorg_layer.cl | 116 | ||||
-rw-r--r-- | src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp | 2 | ||||
-rw-r--r-- | src/core/CL/kernels/CLReorgLayerKernel.cpp | 140 | ||||
-rw-r--r-- | src/core/NEON/kernels/NEReorgLayerKernel.cpp | 14 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLReorgLayer.cpp | 47 | ||||
-rw-r--r-- | tests/datasets/ReorgLayerDataset.h | 133 | ||||
-rw-r--r-- | tests/validation/CL/ReorgLayer.cpp | 168 | ||||
-rw-r--r-- | tests/validation/NEON/ReorgLayer.cpp | 158 | ||||
-rw-r--r-- | tests/validation/fixtures/ReorgLayerFixture.h | 4 | ||||
-rw-r--r-- | tests/validation/reference/ReorgLayer.cpp | 31 |
18 files changed, 868 insertions, 118 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h index 4750031603..4a6773a5f8 100644 --- a/arm_compute/core/CL/CLKernels.h +++ b/arm_compute/core/CL/CLKernels.h @@ -104,6 +104,7 @@ #include "arm_compute/core/CL/kernels/CLROIPoolingLayerKernel.h" #include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h" #include "arm_compute/core/CL/kernels/CLRemapKernel.h" +#include "arm_compute/core/CL/kernels/CLReorgLayerKernel.h" #include "arm_compute/core/CL/kernels/CLReshapeLayerKernel.h" #include "arm_compute/core/CL/kernels/CLScaleKernel.h" #include "arm_compute/core/CL/kernels/CLScharr3x3Kernel.h" diff --git a/arm_compute/core/CL/kernels/CLReorgLayerKernel.h b/arm_compute/core/CL/kernels/CLReorgLayerKernel.h new file mode 100644 index 0000000000..922985422d --- /dev/null +++ b/arm_compute/core/CL/kernels/CLReorgLayerKernel.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLREORGLAYERKERNEL_H__ +#define __ARM_COMPUTE_CLREORGLAYERKERNEL_H__ + +#include "arm_compute/core/CL/ICLKernel.h" +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +class ICLTensor; + +/** OpenCL kernel to perform a reorg layer */ +class CLReorgLayerKernel : public ICLKernel +{ +public: + /** Default constructor */ + CLReorgLayerKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers). */ + CLReorgLayerKernel(const CLReorgLayerKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers). */ + CLReorgLayerKernel &operator=(const CLReorgLayerKernel &) = delete; + /** Allow instances of this class to be moved */ + CLReorgLayerKernel(CLReorgLayerKernel &&) = default; + /** Allow instances of this class to be moved */ + CLReorgLayerKernel &operator=(CLReorgLayerKernel &&) = default; + /** Initialize the kernel's input, output. + * + * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. + * @param[out] output Destination tensor with tensor shape: + * [width_input / stride, height_input / stride, channels_input * stride * stride, batch_size]. This means the output has + * the same number of input elements. Data types supported: same as @p input. + * @param[in] stride Stride value to use for reorganizing the values in the output tensor. + * It defines the spatial distance between 2 consecutive pixels in the x and y direction + */ + void configure(const ICLTensor *input, ICLTensor *output, int32_t stride); + /** Static function to check if given info will lead to a valid configuration of @ref CLReorgLayerKernel + * + * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. + * @param[in] output Destination tensor with tensor shape: + * [width_input / stride, height_input / stride, channels_input * stride * stride, batch_size]. This means the output has + * the same number of input elements. Data types supported: same as @p input. Data types supported: same as @p input. + * @param[in] stride Stride value to use for reorganizing the values in the output tensor + * It defines the spatial distance between 2 consecutive pixels in the x and y direction + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t stride); + + // Inherited methods overridden: + void run(const Window &window, cl::CommandQueue &queue) override; + +private: + const ICLTensor *_input; + ICLTensor *_output; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_CLREORGLAYERKERNEL_H__ */ diff --git a/arm_compute/core/NEON/kernels/NEReorgLayerKernel.h b/arm_compute/core/NEON/kernels/NEReorgLayerKernel.h index 323ab342b2..7e0fb4350d 100644 --- a/arm_compute/core/NEON/kernels/NEReorgLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEReorgLayerKernel.h @@ -55,7 +55,8 @@ public: * * @param[in] input Source tensor. Data type supported: U8/S8/U16/S16/QASYMM8/U32/S32/F16/F32 * @param[out] output Destination tensor. Data type supported: Same as @p input - * @param[in] stride Stride to be used during data re-organization + * @param[in] stride Stride to be used during data re-organization. + * It defines the spatial distance between 2 consecutive pixels in the x and y direction */ void configure(const ITensor *input, ITensor *output, int32_t stride); @@ -64,6 +65,7 @@ public: * @param[in] input Source tensor info. Data type supported: U8/S8/U16/S16/QASYMM8/U32/S32/F16/F32 * @param[in] output Destination tensor info. Data type supported: Same as @p input * @param[in] stride Stride to be used during data re-organization + * It defines the spatial distance between 2 consecutive pixels in the x and y direction * * @return a status */ diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index d2af844b2a..d72547ed07 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -61,17 +61,19 @@ inline TensorShape compute_permutation_output_shape(const ITensorInfo &input, co inline TensorShape compute_reorg_output_shape(const ITensorInfo &input, int32_t stride) { - ARM_COMPUTE_ERROR_ON(stride <= 0); + const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT); + const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL); - const DataLayout data_layout = input.data_layout(); - const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + ARM_COMPUTE_ERROR_ON(stride <= 0); + ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_width] % stride != 0), "The width of the input tensor must be a multiple of stride"); + ARM_COMPUTE_ERROR_ON_MSG((input.tensor_shape()[idx_height] % stride != 0), "The height of the input tensor must be a multiple of stride"); TensorShape output_shape{ input.tensor_shape() }; - output_shape.set(width_idx, input.tensor_shape()[width_idx] / stride); - output_shape.set(height_idx, input.tensor_shape()[height_idx] / stride); - output_shape.set(channel_idx, input.tensor_shape()[channel_idx] * stride * stride); + + output_shape.set(idx_width, output_shape[idx_width] / stride); + output_shape.set(idx_height, output_shape[idx_height] / stride); + output_shape.set(idx_channel, output_shape[idx_channel] * stride * stride); return output_shape; } diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h index d0c62c3426..2139d5dad3 100644 --- a/arm_compute/runtime/CL/CLFunctions.h +++ b/arm_compute/runtime/CL/CLFunctions.h @@ -104,6 +104,7 @@ #include "arm_compute/runtime/CL/functions/CLROIPoolingLayer.h" #include "arm_compute/runtime/CL/functions/CLReductionOperation.h" #include "arm_compute/runtime/CL/functions/CLRemap.h" +#include "arm_compute/runtime/CL/functions/CLReorgLayer.h" #include "arm_compute/runtime/CL/functions/CLReshapeLayer.h" #include "arm_compute/runtime/CL/functions/CLScale.h" #include "arm_compute/runtime/CL/functions/CLScharr3x3.h" diff --git a/arm_compute/runtime/CL/functions/CLReorgLayer.h b/arm_compute/runtime/CL/functions/CLReorgLayer.h new file mode 100644 index 0000000000..88559a83f2 --- /dev/null +++ b/arm_compute/runtime/CL/functions/CLReorgLayer.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLREORGLAYER_H__ +#define __ARM_COMPUTE_CLREORGLAYER_H__ + +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/ICLSimpleFunction.h" + +namespace arm_compute +{ +class ICLTensor; + +class CLReorgLayer : public ICLSimpleFunction +{ +public: + /** Initialise the function's source and destination. + * + * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. + * @param[out] output Destination tensor with tensor shape: + * [width_input / stride, height_input / stride, channels_input * stride * stride, batch_size]. This means the output has + * the same number of input elements. Data types supported: same as @p input. + * @param[in] stride Stride value to use for reorganizing the values in the output tensor. + * It defines the spatial distance between 2 consecutive pixels in the x and y direction + * + */ + void configure(ICLTensor *input, ICLTensor *output, int32_t stride); + /** Static function to check if given info will lead to a valid configuration of @ref CLReorgLayer + * + * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. + * @param[in] output Destination tensor with tensor shape: + * [width_input / stride, height_input / stride, channels_input * stride * stride, batch_size]. This means the output has + * the same number of input elements. Data types supported: same as @p input. Data types supported: same as @p input. + * @param[in] stride Stride value to use for reorganizing the values in the output tensor + * It defines the spatial distance between 2 consecutive pixels in the x and y direction + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t stride); +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_CLREORGLAYER_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NEReorgLayer.h b/arm_compute/runtime/NEON/functions/NEReorgLayer.h index f29b2f1964..a73752832b 100644 --- a/arm_compute/runtime/NEON/functions/NEReorgLayer.h +++ b/arm_compute/runtime/NEON/functions/NEReorgLayer.h @@ -41,6 +41,7 @@ public: * @param[in] input First tensor input. Data type supported: U8/S8/QASYMM8//U16/S16/U32/S32/F16/F32 * @param[out] output Output tensor. Data type supported: Same as @p input * @param[in] stride Stride to be used during data re-organization + * It defines the spatial distance between 2 consecutive pixels in the x and y direction */ void configure(const ITensor *input, ITensor *output, int32_t stride); @@ -49,6 +50,7 @@ public: * @param[in] input First tensor info. Data type supported: U8/S8/QASYMM8//U16/S16/U32/S32/F16/F32 * @param[in] output Output tensor info. Data type supported: Same as @p input * @param[in] stride Stride to be used during data re-organization + * It defines the spatial distance between 2 consecutive pixels in the x and y direction * * @return a status */ diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 4af2b09530..7cc586bff1 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -336,6 +336,8 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map = { "reduction_operation", "reduction_operation.cl" }, { "remap_nearest_neighbour", "remap.cl" }, { "remap_bilinear", "remap.cl" }, + { "reorg_layer_nchw", "reorg_layer.cl" }, + { "reorg_layer_nhwc", "reorg_layer.cl" }, { "reshape_layer", "reshape_layer.cl" }, { "reshape_to_columns", "convolution_layer.cl" }, { "RGB888_to_IYUV_bt709", "color_convert.cl" }, @@ -720,6 +722,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map = #include "./cl_kernels/remap.clembed" }, { + "reorg_layer.cl", +#include "./cl_kernels/reorg_layer.clembed" + }, + { "reshape_layer.cl", #include "./cl_kernels/reshape_layer.clembed" }, diff --git a/src/core/CL/cl_kernels/reorg_layer.cl b/src/core/CL/cl_kernels/reorg_layer.cl new file mode 100644 index 0000000000..a275699a3f --- /dev/null +++ b/src/core/CL/cl_kernels/reorg_layer.cl @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" + +#if defined(DATA_TYPE) && defined(SRC_DEPTH) && defined(STRIDE) + +#define CALCULATE_SRC_COORDINATES(xo, yo, zo, xi, yi, zi) \ + ({ \ + int offset = zo / (int)SRC_DEPTH; \ + xi = xo * (int)STRIDE + offset % (int)STRIDE; \ + yi = yo * (int)STRIDE + offset / (int)STRIDE; \ + zi = zo % SRC_DEPTH; \ + }) + +/** Performs a reorganization layer of input tensor to the output tensor when the data layout is NCHW + * + * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float + * @note The depth of the input tensor must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=64 + * @note The distance between 2 consecutive pixels along the x and y direction must be passed at compile time using -DSTRIDE: e.g. -DSTRIDE=2 + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void reorg_layer_nchw( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst)) +{ + Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(dst); + + int xo = get_global_id(0); + int yo = get_global_id(1); + int zo = get_global_id(2); + int xi, yi, zi; + + CALCULATE_SRC_COORDINATES(xo, yo, zo, xi, yi, zi); + + int src_offset = xi * sizeof(DATA_TYPE) + yi * src_stride_y + zi * src_stride_z; + *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + src_offset)); +} + +/** Performs a reorganization layer of input tensor to the output tensor when the data layout is NHWC + * + * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float + * @note The depth of the input tensor must be passed at compile time using -DSRC_DEPTH: e.g. -DSRC_DEPTH=64 + * @note The distance between 2 consecutive pixels along the x and y direction must be passed at compile time using -DSTRIDE: e.g. -DSTRIDE=2 + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void reorg_layer_nhwc( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst)) +{ + Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(dst); + + int xo = get_global_id(1); + int yo = get_global_id(2); + int zo = get_global_id(0); + int xi, yi, zi; + + CALCULATE_SRC_COORDINATES(xo, yo, zo, xi, yi, zi); + + int src_offset = zi * sizeof(DATA_TYPE) + xi * src_stride_y + yi * src_stride_z; + + *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + src_offset)); +} +#endif // // defined(DATA_TYPE) && defined(SRC_DEPTH) && defined(STRIDE)
\ No newline at end of file diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp index d4a72076c1..1fa5c8521f 100644 --- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp @@ -192,8 +192,6 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out ICLKernel::configure_internal(win_config.second); _config_id = "batch_normalization_layer_"; - _config_id += string_from_data_layout(input->info()->data_layout()); - _config_id += "_"; _config_id += string_from_data_type(input->info()->data_type()); _config_id += "_"; _config_id += support::cpp11::to_string(input->info()->dimension(0)); diff --git a/src/core/CL/kernels/CLReorgLayerKernel.cpp b/src/core/CL/kernels/CLReorgLayerKernel.cpp new file mode 100644 index 0000000000..7891844ef6 --- /dev/null +++ b/src/core/CL/kernels/CLReorgLayerKernel.cpp @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLReorgLayerKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +#include <string> + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t stride) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, + DataType::U8, DataType::S8, DataType::QASYMM8, + DataType::U16, DataType::S16, + DataType::U32, DataType::S32, + DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); + + const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + + ARM_COMPUTE_RETURN_ERROR_ON(stride <= 0); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_width] % stride) != 0, "The width of the input tensor must be a multiple of stride"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_height] % stride) != 0, "The height of the input tensor must be a multiple of stride"); + + // Validate output if initialized + if(output->total_size() != 0) + { + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input, stride)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + + return Status{}; +} +} // namespace + +CLReorgLayerKernel::CLReorgLayerKernel() + : _input(nullptr), _output(nullptr) +{ +} + +void CLReorgLayerKernel::configure(const ICLTensor *input, ICLTensor *output, int32_t stride) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), stride)); + + _input = input; + _output = output; + + std::string kernel_name = std::string("reorg_layer_") + lower_string(string_from_data_layout(input->info()->data_layout())); + const size_t idx_channel = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL); + + // Create kernel + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + build_opts.add_option("-DSRC_DEPTH=" + support::cpp11::to_string(input->info()->dimension(idx_channel))); + build_opts.add_option("-DSTRIDE=" + support::cpp11::to_string(stride)); + _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); + + // Configure window + // auto inizialize the output tensor if not yet initialized + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input->info(), stride))); + + Window win = calculate_max_window(*output->info(), Steps()); + + // The CLWeightsReshapeKernel doesn't need padding so update_window_and_padding() can be skipped + output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); + ICLKernel::configure_internal(win); + + _config_id = kernel_name; + _config_id += "_"; + _config_id += string_from_data_type(input->info()->data_type()); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(1)); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(2)); + _config_id += "_"; + _config_id += support::cpp11::to_string(stride); +} + +Status CLReorgLayerKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output, int32_t stride) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, stride)); + + return Status{}; +} + +void CLReorgLayerKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + Window slice = window.first_slice_window_3D(); + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, slice); + add_3D_tensor_argument(idx, _output, slice); + enqueue(queue, *this, slice, lws_hint()); + } + while(window.slide_window_slice_3D(slice)); +} +} // namespace arm_compute
\ No newline at end of file diff --git a/src/core/NEON/kernels/NEReorgLayerKernel.cpp b/src/core/NEON/kernels/NEReorgLayerKernel.cpp index 1b2ec92cba..8baea2b990 100644 --- a/src/core/NEON/kernels/NEReorgLayerKernel.cpp +++ b/src/core/NEON/kernels/NEReorgLayerKernel.cpp @@ -46,14 +46,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(stride <= 0, "Stride should be a positive number"); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); - const TensorShape output_shape = misc::shape_calculator::compute_reorg_output_shape(*input, stride); + const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); - // Validate configured output + ARM_COMPUTE_RETURN_ERROR_ON(stride <= 0); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_width] % stride) != 0, "The width of the input tensor must be a multiple of stride"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_height] % stride) != 0, "The height of the input tensor must be a multiple of stride"); + + // Validate output if initialized if(output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input, stride)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } diff --git a/src/runtime/CL/functions/CLReorgLayer.cpp b/src/runtime/CL/functions/CLReorgLayer.cpp new file mode 100644 index 0000000000..8e04d16b12 --- /dev/null +++ b/src/runtime/CL/functions/CLReorgLayer.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLReorgLayer.h" + +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/kernels/CLReorgLayerKernel.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Validate.h" +#include "support/ToolchainSupport.h" + +#include <utility> + +using namespace arm_compute; + +void CLReorgLayer::configure(ICLTensor *input, ICLTensor *output, int32_t stride) +{ + auto k = arm_compute::support::cpp14::make_unique<CLReorgLayerKernel>(); + k->configure(input, output, stride); + _kernel = std::move(k); +} + +Status CLReorgLayer::validate(const ITensorInfo *input, const ITensorInfo *output, int32_t stride) +{ + return CLReorgLayerKernel::validate(input, output, stride); +} diff --git a/tests/datasets/ReorgLayerDataset.h b/tests/datasets/ReorgLayerDataset.h new file mode 100644 index 0000000000..de363e7a3e --- /dev/null +++ b/tests/datasets/ReorgLayerDataset.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_REORGLAYER_DATASET +#define ARM_COMPUTE_TEST_REORGLAYER_DATASET + +#include "utils/TypePrinter.h" + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +namespace test +{ +namespace datasets +{ +class ReorgLayerDataset +{ +public: + using type = std::tuple<TensorShape, unsigned int>; + + struct iterator + { + iterator(std::vector<TensorShape>::const_iterator src_it, + std::vector<unsigned int>::const_iterator stride_it) + : _src_it{ std::move(src_it) }, + _stride_it{ std::move(stride_it) } + { + } + + std::string description() const + { + std::stringstream description; + description << "In=" << *_src_it << ":"; + description << "Stride=" << *_stride_it; + return description.str(); + } + + ReorgLayerDataset::type operator*() const + { + return std::make_tuple(*_src_it, *_stride_it); + } + + iterator &operator++() + { + ++_src_it; + ++_stride_it; + + return *this; + } + + private: + std::vector<TensorShape>::const_iterator _src_it; + std::vector<unsigned int>::const_iterator _stride_it; + }; + + iterator begin() const + { + return iterator(_src_shapes.begin(), _stride.begin()); + } + + int size() const + { + return std::min(_src_shapes.size(), _stride.size()); + } + + void add_config(TensorShape src, unsigned int stride) + { + _src_shapes.emplace_back(std::move(src)); + _stride.emplace_back(std::move(stride)); + } + +protected: + ReorgLayerDataset() = default; + ReorgLayerDataset(ReorgLayerDataset &&) = default; + +private: + std::vector<TensorShape> _src_shapes{}; + std::vector<unsigned int> _stride{}; +}; + +/** Dataset containing small reorg layer shapes. */ +class SmallReorgLayerDataset final : public ReorgLayerDataset +{ +public: + SmallReorgLayerDataset() + { + add_config(TensorShape(26U, 26U, 64U, 1U), 2U); + add_config(TensorShape(28U, 28U, 13U, 1U), 4U); + add_config(TensorShape(12U, 14U, 4U, 1U), 2U); + add_config(TensorShape(9U, 12U, 2U, 4U), 3U); + add_config(TensorShape(25U, 15U, 4U, 2U), 5U); + } +}; + +/** Dataset containing large reorg layer shapes. */ +class LargeReorgLayerDataset final : public ReorgLayerDataset +{ +public: + LargeReorgLayerDataset() + { + add_config(TensorShape(49U, 28U, 64U, 1U), 7U); + add_config(TensorShape(63U, 21U, 13U, 1U), 3U); + add_config(TensorShape(48U, 54U, 4U, 1U), 2U); + add_config(TensorShape(114U, 117U, 2U, 4U), 3U); + add_config(TensorShape(100U, 95U, 4U, 2U), 5U); + } +}; +} // namespace datasets +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_REORGLAYER_DATASET */ diff --git a/tests/validation/CL/ReorgLayer.cpp b/tests/validation/CL/ReorgLayer.cpp new file mode 100644 index 0000000000..e4caa04c3a --- /dev/null +++ b/tests/validation/CL/ReorgLayer.cpp @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/functions/CLReorgLayer.h" +#include "tests/CL/CLAccessor.h" +#include "tests/CL/Helper.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ReorgLayerDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/ReorgLayerFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(CL) +TEST_SUITE(ReorgLayer) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::S64), // Wrong output tensor + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Wrong output tensor + TensorInfo(TensorShape(3U, 12U, 4U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 12U, 4U, 2U), 1, DataType::F32), // Wrong data type + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::S64), + TensorInfo(TensorShape(5U, 6U, 4U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U, 2, 2U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 4U, 36U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 4U, 36U, 2U), 1, DataType::F16), + })), + framework::dataset::make("Stride", { 2, 2, 4, 3 })), + framework::dataset::make("Expected", { false, true, false, true, false })), + input_info, output_info, stride, expected) +{ + bool status = bool(CLReorgLayer::validate(&input_info, &output_info, stride)); + ARM_COMPUTE_EXPECT(status == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallReorgLayerDataset(), datasets::LargeReorgLayerDataset()), + framework::dataset::make("DataType", { DataType::F32, DataType::F16, DataType::QASYMM8 })), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + shape, stride, data_type, data_layout) +{ + // Permute the tensor shape in case of NHWC data layout + TensorShape shape_to_use = shape; + if(data_layout == DataLayout::NHWC) + { + permute(shape_to_use, PermutationVector(2U, 0U, 1U)); + } + + // Create tensors + CLTensor src = create_tensor<CLTensor>(shape_to_use, data_type, 1, QuantizationInfo(), data_layout); + CLTensor dst; + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLReorgLayer reorg_layer; + + // Auto-initialize the output within the function + reorg_layer.configure(&src, &dst, stride); + + // Validate valid region + const ValidRegion src_valid_region = shape_to_valid_region(shape_to_use); + const ValidRegion dst_valid_region = shape_to_valid_region(dst.info()->tensor_shape()); + validate(src.info()->valid_region(), src_valid_region); + validate(dst.info()->valid_region(), dst_valid_region); + + // Validate padding + const int step = 1; + const PaddingSize src_padding = PaddingCalculator(shape_to_use.x(), step).required_padding(); + const PaddingSize dst_padding = PaddingCalculator(dst.info()->tensor_shape().x(), step).required_padding(); + validate(src.info()->padding(), src_padding); + validate(dst.info()->padding(), dst_padding); +} + +template <typename T> +using CLReorgLayerFixture = ReorgLayerValidationFixture<CLTensor, CLAccessor, CLReorgLayer, T>; + +TEST_SUITE(S32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLReorgLayerFixture<int32_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLReorgLayerFixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S32 + +TEST_SUITE(S16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLReorgLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S16)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLReorgLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S16)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S16 + +TEST_SUITE(S8) +FIXTURE_DATA_TEST_CASE(RunSmall, CLReorgLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLReorgLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeReorgLayerDataset(), framework::dataset::make("DataType", DataType::S8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S8 + +TEST_SUITE_END() // ReorgLayer +TEST_SUITE_END() // CL +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/NEON/ReorgLayer.cpp b/tests/validation/NEON/ReorgLayer.cpp index 6489b6529f..5a76315d77 100644 --- a/tests/validation/NEON/ReorgLayer.cpp +++ b/tests/validation/NEON/ReorgLayer.cpp @@ -28,7 +28,7 @@ #include "tests/NEON/Accessor.h" #include "tests/PaddingCalculator.h" -#include "tests/datasets/ShapeDatasets.h" +#include "tests/datasets/ReorgLayerDataset.h" #include "tests/framework/Asserts.h" #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" @@ -44,124 +44,124 @@ namespace validation TEST_SUITE(NEON) TEST_SUITE(ReorgLayer) -DATA_TEST_CASE(Configuration, - framework::DatasetMode::ALL, - combine(combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), - framework::dataset::make("Stride", { 2, 3 })), - framework::dataset::make("DataType", { DataType::QASYMM8, DataType::F16, DataType::F32 })), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::S64), // Wrong output tensor + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Wrong output tensor + TensorInfo(TensorShape(3U, 12U, 4U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 12U, 4U, 2U), 1, DataType::F32), // Wrong data type + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::S64), + TensorInfo(TensorShape(5U, 6U, 4U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U, 2, 2U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 4U, 36U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 4U, 36U, 2U), 1, DataType::F16), + })), + framework::dataset::make("Stride", { 2, 2, 4, 3 })), + framework::dataset::make("Expected", { false, true, false, true, false })), + input_info, output_info, stride, expected) +{ + bool status = bool(NEReorgLayer::validate(&input_info, &output_info, stride)); + ARM_COMPUTE_EXPECT(status == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallReorgLayerDataset(), datasets::LargeReorgLayerDataset()), + framework::dataset::make("DataType", { DataType::F32, DataType::F16, DataType::QASYMM8 })), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), shape, stride, data_type, data_layout) { + // Permute the tensor shape in case of NHWC data layout + TensorShape shape_to_use = shape; + if(data_layout == DataLayout::NHWC) + { + permute(shape_to_use, PermutationVector(2U, 0U, 1U)); + } + // Create tensors - Tensor ref_src = create_tensor<Tensor>(shape, data_type, 1, QuantizationInfo(), data_layout); + Tensor src = create_tensor<Tensor>(shape_to_use, data_type, 1, QuantizationInfo(), data_layout); Tensor dst; - // Create and Configure function - NEReorgLayer reorg_func; - reorg_func.configure(&ref_src, &dst, stride); + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + NEReorgLayer reorg_layer; + + // Auto-initialize the output within the function + reorg_layer.configure(&src, &dst, stride); // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(dst.info()->tensor_shape()); - validate(dst.info()->valid_region(), valid_region); -} + const ValidRegion src_valid_region = shape_to_valid_region(shape_to_use); + const ValidRegion dst_valid_region = shape_to_valid_region(dst.info()->tensor_shape()); + validate(src.info()->valid_region(), src_valid_region); + validate(dst.info()->valid_region(), dst_valid_region); -// *INDENT-OFF* -// clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("InputInfo",{ - TensorInfo(TensorShape(8U, 8U, 5U, 3U), 1, DataType::U16), // Invalid stride - TensorInfo(TensorShape(8U, 8U, 5U, 3U), 1, DataType::U16), // Invalid output shape - TensorInfo(TensorShape(8U, 8U, 5U, 3U), 1, DataType::U16), // valid - }), - framework::dataset::make("OutputInfo", { - TensorInfo(TensorShape(4U, 4U, 20U, 3U), 1, DataType::U16), - TensorInfo(TensorShape(4U, 4U, 10U, 3U), 1, DataType::U16), - TensorInfo(TensorShape(4U, 4U, 20U, 3U), 1, DataType::U16), - })), - framework::dataset::make("Stride", { -1, 2, 2 })), - framework::dataset::make("Expected", { false, false, true })), - input_info, output_info, stride, expected) -{ - Status status = NEReorgLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), stride); - ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); + // Validate padding + const int step = 1; + const PaddingSize src_padding = PaddingCalculator(shape_to_use.x(), step).required_padding(); + const PaddingSize dst_padding = PaddingCalculator(dst.info()->tensor_shape().x(), step).required_padding(); + validate(src.info()->padding(), src_padding); + validate(dst.info()->padding(), dst_padding); } -// clang-format on -// *INDENT-ON* template <typename T> using NEReorgLayerFixture = ReorgLayerValidationFixture<Tensor, Accessor, NEReorgLayer, T>; -TEST_SUITE(U8) -FIXTURE_DATA_TEST_CASE(RunSmall, - NEReorgLayerFixture<uint8_t>, - framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("Stride", { 2, 3 })), - framework::dataset::make("DataType", DataType::U8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +TEST_SUITE(S32) +FIXTURE_DATA_TEST_CASE(RunSmall, NEReorgLayerFixture<int32_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, - NEReorgLayerFixture<uint8_t>, - framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("Stride", { 2, 3 })), - framework::dataset::make("DataType", DataType::U8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEReorgLayerFixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference); } -TEST_SUITE_END() // U8 - -TEST_SUITE(U16) -FIXTURE_DATA_TEST_CASE(RunSmall, - NEReorgLayerFixture<uint16_t>, - framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("Stride", { 2, 3 })), - framework::dataset::make("DataType", DataType::U16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +TEST_SUITE_END() // S32 + +TEST_SUITE(S16) +FIXTURE_DATA_TEST_CASE(RunSmall, NEReorgLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S16)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, - NEReorgLayerFixture<uint16_t>, - framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("Stride", { 2, 3 })), - framework::dataset::make("DataType", DataType::U16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEReorgLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S16)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference); } -TEST_SUITE_END() // U16 - -TEST_SUITE(U32) -FIXTURE_DATA_TEST_CASE(RunSmall, - NEReorgLayerFixture<uint32_t>, - framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("Stride", { 2, 3 })), - framework::dataset::make("DataType", DataType::U32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +TEST_SUITE_END() // S16 + +TEST_SUITE(S8) +FIXTURE_DATA_TEST_CASE(RunSmall, NEReorgLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallReorgLayerDataset(), framework::dataset::make("DataType", + DataType::S8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, - NEReorgLayerFixture<uint32_t>, - framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("Stride", { 2, 3 })), - framework::dataset::make("DataType", DataType::U32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEReorgLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeReorgLayerDataset(), framework::dataset::make("DataType", DataType::S8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference); } -TEST_SUITE_END() // U32 +TEST_SUITE_END() // S8 TEST_SUITE_END() // ReorgLayer TEST_SUITE_END() // NEON diff --git a/tests/validation/fixtures/ReorgLayerFixture.h b/tests/validation/fixtures/ReorgLayerFixture.h index 2bc5a6fb8c..3300e0dd7c 100644 --- a/tests/validation/fixtures/ReorgLayerFixture.h +++ b/tests/validation/fixtures/ReorgLayerFixture.h @@ -59,9 +59,7 @@ protected: TensorType compute_target(TensorShape input_shape, int32_t stride, DataType data_type, DataLayout data_layout) { - // Check if indeed the input shape can be reshape to the output one - ARM_COMPUTE_EXPECT(stride >= 0, framework::LogLevel::ERRORS); - + // Note: The input shape passed to the function is always in NCHW if(data_layout == DataLayout::NHWC) { permute(input_shape, PermutationVector(2U, 0U, 1U)); diff --git a/tests/validation/reference/ReorgLayer.cpp b/tests/validation/reference/ReorgLayer.cpp index cb13a737e0..2eb5d01926 100644 --- a/tests/validation/reference/ReorgLayer.cpp +++ b/tests/validation/reference/ReorgLayer.cpp @@ -24,6 +24,7 @@ #include "ReorgLayer.h" #include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" namespace arm_compute { @@ -33,29 +34,17 @@ namespace validation { namespace reference { -namespace -{ -TensorShape compute_reorg_shape(const TensorShape &src_shape, int32_t stride) -{ - ARM_COMPUTE_ERROR_ON(stride <= 0); - - TensorShape dst_shape = src_shape; - dst_shape.set(0, src_shape.x() / stride); - dst_shape.set(1, src_shape.y() / stride); - dst_shape.set(2, src_shape.z() * stride * stride); - - return dst_shape; -} -} // namespace - template <typename T> SimpleTensor<T> reorg_layer(const SimpleTensor<T> &src, int32_t stride) { - // Calculate output shape - const TensorShape dst_shape = compute_reorg_shape(src.shape(), stride); + ARM_COMPUTE_ERROR_ON(src.shape().num_dimensions() > 4); + ARM_COMPUTE_ERROR_ON(src.data_layout() != DataLayout::NCHW); + + TensorInfo input_info(src.shape(), 1, src.data_type()); + const TensorShape output_shape = misc::shape_calculator::compute_reorg_output_shape(input_info, stride); // Create destination tensor - SimpleTensor<T> dst{ dst_shape, src.data_type() }; + SimpleTensor<T> dst{ output_shape, src.data_type() }; const unsigned int W = dst.shape().x(); const unsigned int H = dst.shape().y(); @@ -88,9 +77,9 @@ SimpleTensor<T> reorg_layer(const SimpleTensor<T> &src, int32_t stride) return dst; } -template SimpleTensor<uint8_t> reorg_layer(const SimpleTensor<uint8_t> &src, int32_t stride); -template SimpleTensor<uint16_t> reorg_layer(const SimpleTensor<uint16_t> &src, int32_t stride); -template SimpleTensor<uint32_t> reorg_layer(const SimpleTensor<uint32_t> &src, int32_t stride); +template SimpleTensor<int32_t> reorg_layer(const SimpleTensor<int32_t> &src, int32_t stride); +template SimpleTensor<int16_t> reorg_layer(const SimpleTensor<int16_t> &src, int32_t stride); +template SimpleTensor<int8_t> reorg_layer(const SimpleTensor<int8_t> &src, int32_t stride); } // namespace reference } // namespace validation } // namespace test |