From a9c4472188abef421adb589e2a6fef52727d465f Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 5 Apr 2019 17:18:36 +0100 Subject: COMPMID-2051 Refactor shape_calculator::calculate_concatenate_shape Change-Id: Ibf316718d11fa975d75f226925747b21c4efd127 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/974 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- .../CL/kernels/CLDepthConcatenateLayerKernel.h | 5 +- .../kernels/GCDepthConcatenateLayerKernel.h | 5 +- .../NEON/kernels/NEDepthConcatenateLayerKernel.h | 7 +- arm_compute/core/utils/misc/ShapeCalculator.h | 55 +++---- arm_compute/runtime/GLES_COMPUTE/GCFunctions.h | 3 +- .../GLES_COMPUTE/functions/GCConcatenateLayer.h | 68 ++++++++ .../functions/GCDepthConcatenateLayer.h | 3 +- docs/00_introduction.dox | 9 + src/core/CL/cl_kernels/concatenate.cl | 6 +- .../CL/kernels/CLDepthConcatenateLayerKernel.cpp | 42 +---- src/core/GLES_COMPUTE/cs_shaders/concatenate.cs | 6 +- .../kernels/GCDepthConcatenateLayerKernel.cpp | 45 +---- .../NEON/kernels/NEDepthConcatenateLayerKernel.cpp | 39 +---- src/graph/nodes/ConcatenateLayerNode.cpp | 14 +- src/runtime/CL/functions/CLConcatenateLayer.cpp | 39 ++--- .../CL/functions/CLDepthConcatenateLayer.cpp | 4 +- .../GLES_COMPUTE/functions/GCConcatenateLayer.cpp | 81 +++++++++ src/runtime/NEON/functions/NEConcatenateLayer.cpp | 35 ++-- .../NEON/functions/NEDepthConcatenateLayer.cpp | 6 +- .../fixtures/DepthConcatenateLayerFixture.h | 4 +- tests/validation/CL/DepthConcatenateLayer.cpp | 4 +- tests/validation/CL/LSTMLayer.cpp | 2 +- .../GLES_COMPUTE/DepthConcatenateLayer.cpp | 33 ++-- tests/validation/Helpers.cpp | 40 ----- tests/validation/NEON/LSTMLayer.cpp | 2 +- .../validation/fixtures/ConcatenateLayerFixture.h | 8 +- .../fixtures/DepthConcatenateLayerFixture.h | 183 --------------------- 27 files changed, 277 insertions(+), 471 deletions(-) create mode 100644 arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h create mode 100644 src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp delete mode 100644 tests/validation/fixtures/DepthConcatenateLayerFixture.h diff --git a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h index ff8009085f..2a1845226f 100644 --- a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -73,13 +73,10 @@ public: // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; - BorderSize border_size() const override; private: const ICLTensor *_input; ICLTensor *_output; - int _top_bottom; - int _left_right; unsigned int _depth_offset; }; } // namespace arm_compute diff --git a/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h b/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h index 06a54dd0b3..6a03170065 100644 --- a/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h +++ b/arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -64,13 +64,10 @@ public: // Inherited methods overridden: void run(const Window &window) override; - BorderSize border_size() const override; private: const IGCTensor *_input; IGCTensor *_output; - int _top_bottom; - int _left_right; int _depth_offset; }; } diff --git a/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h b/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h index 848d89fc9f..26e23a73a5 100644 --- a/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -76,17 +76,14 @@ public: // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; - BorderSize border_size() const override; private: - using DepthConcatFunction = void(const ITensor *in, ITensor *out, std::pair start_xy, int depth_offset, const Window &window); + using DepthConcatFunction = void(const ITensor *in, ITensor *out, int depth_offset, const Window &window); private: DepthConcatFunction *_func; const ITensor *_input; ITensor *_output; - int _top_bottom; - int _left_right; unsigned int _depth_offset; }; } // namespace arm_compute diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 384bd460a0..f5058b35fb 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -1173,6 +1173,11 @@ inline TensorShape extract_shape(const TensorShape *data) return *data; } +inline TensorShape extract_shape(TensorShape *data) +{ + return *data; +} + /** Calculate the unstack shape of a tensor * * @param[in] input_shape Input tensor shape @@ -1187,37 +1192,6 @@ inline TensorShape calculate_unstack_shape(TensorShape input_shape, unsigned int return input_shape; } -/** Calculate the depth concatenate output shape of a vector of tensors - * - * @param[in] inputs_vector Vector containing the shapes of the inputs - * - * @return the calculated shape - */ -template -inline TensorShape calculate_depth_concatenate_shape(const std::vector &inputs_vector) -{ - TensorShape out_shape = extract_shape(inputs_vector[0]); - - size_t max_x = 0; - size_t max_y = 0; - size_t depth = 0; - - for(const auto &tensor : inputs_vector) - { - ARM_COMPUTE_ERROR_ON(tensor == nullptr); - const TensorShape shape = extract_shape(tensor); - max_x = std::max(shape.x(), max_x); - max_y = std::max(shape.y(), max_y); - depth += shape.z(); - } - - out_shape.set(0, max_x); - out_shape.set(1, max_y); - out_shape.set(2, depth); - - return out_shape; -} - /** Calculate the concatenate output shape of the concatenate operation along a single axis * * @param[in] input Vector containing the shapes of the inputs @@ -1230,12 +1204,27 @@ inline TensorShape calculate_concatenate_shape(const std::vector &input, si { TensorShape out_shape = extract_shape(input[0]); + // All dimensions must match except the axis one + for(unsigned int i = 0; i < MAX_DIMS; ++i) + { + if(i == axis) + { + continue; + } + + for(const auto &tensor : input) + { + ARM_COMPUTE_ERROR_ON(tensor == nullptr); + const TensorShape shape = extract_shape(tensor); + ARM_COMPUTE_ERROR_ON(out_shape[i] != shape[i]); + } + } + + // Calculate output shape size_t new_size = 0; for(const auto &tensor : input) { - ARM_COMPUTE_ERROR_ON(tensor == nullptr); const TensorShape shape = extract_shape(tensor); - ARM_COMPUTE_ERROR_ON(axis >= shape.num_dimensions()); new_size += shape[axis]; } diff --git a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h index 6f338568c2..7e01480801 100644 --- a/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h +++ b/arm_compute/runtime/GLES_COMPUTE/GCFunctions.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -29,6 +29,7 @@ #include "arm_compute/runtime/GLES_COMPUTE/functions/GCActivationLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCBatchNormalizationLayer.h" +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.h" diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h new file mode 100644 index 0000000000..c57b282dbe --- /dev/null +++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_GCDEPTHCONCATENATELAYER_H__ +#define __ARM_COMPUTE_GCDEPTHCONCATENATELAYER_H__ + +#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h" +#include "arm_compute/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/IFunction.h" + +#include +#include + +namespace arm_compute +{ +class IGCTensor; + +/** Basic function to execute concatenate tensors along a given axis. This function calls the following kernels: + * + * @note only axis z is supported + * -# @ref GCDepthConcatenateLayerKernel + */ +class GCConcatenateLayer : public IFunction +{ +public: + /** Default constructor */ + GCConcatenateLayer(); + /** Initialise the kernel's inputs vector and output. + * + * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis. + * + * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: F16/F32. + * @param[out] output Output tensor. Data types supported: Same as @p input. + * @param[in] axis Concatenation axis. Supported underlying concatenation axis is 2. + */ + void configure(std::vector inputs_vector, IGCTensor *output, size_t axis); + + // Inherited methods overridden: + void run() override; + +private: + std::vector> _concat_kernels; + unsigned int _num_inputs; + unsigned int _axis; +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_GCDEPTHCONCATENATELAYER_H__ */ diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h index 1151399f92..307ec49952 100644 --- a/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h +++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -39,6 +39,7 @@ class IGCTensor; /** Basic function to execute concatenate tensors along z axis. This function calls the following kernels: * + * @deprecated This function is deprecated and will be removed in release 19.08 * -# @ref GCFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions) * -# @ref GCDepthConcatenateLayerKernel * diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox index 3094573add..45eb4c696d 100644 --- a/docs/00_introduction.dox +++ b/docs/00_introduction.dox @@ -236,6 +236,15 @@ If there is more than one release in a month then an extra sequential number is @subsection S2_2_changelog Changelog +v19.05 Public major release + - Deprecated functions/interfaces + - @ref GCDepthConcatenateLayer + - @ref NEWidthConcatenateLayer + - @ref NEDepthConcatenateLayer + - @ref CLWidthConcatenateLayer + - @ref CLDepthConcatenateLayer + + v19.02 Public major release - Various bug fixes. - Various optimisations. diff --git a/src/core/CL/cl_kernels/concatenate.cl b/src/core/CL/cl_kernels/concatenate.cl index 99f4659960..23ebcf91b6 100644 --- a/src/core/CL/cl_kernels/concatenate.cl +++ b/src/core/CL/cl_kernels/concatenate.cl @@ -409,19 +409,19 @@ __kernel void concatenate_height( __kernel void concatenate_depth( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), - int3 offsets) + int offset) { Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - source_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&src, -offsets.x, -offsets.y, 0)); + source_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr); #if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) source_values = requantize(source_values, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT); #endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */ VSTORE(VEC_SIZE) - (source_values, 0, (__global DATA_TYPE *)(dst.ptr + offsets.z)); + (source_values, 0, (__global DATA_TYPE *)(dst.ptr + offset)); } #endif /* defined(DATA_TYPE) && defined(VEC_SIZE) */ diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp index 3fccc0447d..1cae3712dc 100644 --- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp +++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,19 +47,13 @@ std::pair validate_and_configure_window(ITensorInfo *input, unsi { ARM_COMPUTE_UNUSED(depth_offset); - // Configure kernel window - const int left_right = (output->dimension(0) - input->dimension(0)) / 2; - const int top_bottom = (output->dimension(1) - input->dimension(1)) / 2; - const unsigned int num_elems_processed_per_iteration = 16 / input->element_size(); - const unsigned int num_elems_read_per_iteration = 16 / input->element_size(); - const unsigned int num_rows_read_per_iteration = 1; // The window needs to be based on input as we copy all the depths of input Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1)); - AccessWindowRectangle input_access(input, -left_right, -top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); bool window_changed = update_window_and_padding(win, input_access, output_access); output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); @@ -74,30 +68,20 @@ Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, c ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimX) != output->dimension(Window::DimX)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimY) != output->dimension(Window::DimY)); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) + depth_offset > output->dimension(2)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) > output->dimension(0)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) > output->dimension(1)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, input, output); - // The gaps between the two lowest dimensions of input and output need to be divisible by 2 - // Otherwise it is not clear how the padding should be added onto the input tensor - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) - input->dimension(0)) % 2); - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(1) - input->dimension(1)) % 2); - return Status{}; } } // namespace CLDepthConcatenateLayerKernel::CLDepthConcatenateLayerKernel() - : _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0) + : _input(nullptr), _output(nullptr), _depth_offset(0) { } -BorderSize CLDepthConcatenateLayerKernel::border_size() const -{ - return BorderSize(_top_bottom, _left_right); -} - void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int depth_offset, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); @@ -124,10 +108,6 @@ void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i // Create kernel _kernel = static_cast(CLKernelLibrary::get().create_kernel("concatenate_depth", build_opts.options())); - // Configure kernel window - _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2; - _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2; - // Configure kernel window auto win_config = validate_and_configure_window(input->info(), depth_offset, output->info()); ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); @@ -153,16 +133,8 @@ void CLDepthConcatenateLayerKernel::run(const Window &window, cl::CommandQueue & const int offset_to_first_elements_in_bytes = _depth_offset * _output->info()->strides_in_bytes()[2]; - unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters - const cl_int3 offsets = - { - { - static_cast(_left_right), - static_cast(_top_bottom), - static_cast(offset_to_first_elements_in_bytes), - } - }; - _kernel.setArg(idx, offsets); + unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters + _kernel.setArg(idx, offset_to_first_elements_in_bytes); do { diff --git a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs b/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs index 69ac50b4d0..49b3954fca 100644 --- a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs +++ b/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -53,7 +53,7 @@ void main(void) Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift); Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift); - float tmp = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, -OFFSET_X, -OFFSET_Y, 0)); + float tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter); STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp); } @@ -66,7 +66,7 @@ void main(void) Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift); Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift); - uvec2 tmp = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, -OFFSET_X, -OFFSET_Y, 0)); + uvec2 tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter); STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp); } #endif /*DATA_TYPE_FP16*/ diff --git a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp index 36d1b29bba..6f70efe5c7 100644 --- a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp +++ b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,29 +38,19 @@ using namespace arm_compute; GCDepthConcatenateLayerKernel::GCDepthConcatenateLayerKernel() - : _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0) + : _input(nullptr), _output(nullptr), _depth_offset(0) { } - -BorderSize GCDepthConcatenateLayerKernel::border_size() const -{ - return BorderSize(_top_bottom, _left_right); -} - void GCDepthConcatenateLayerKernel::configure(const IGCTensor *input, unsigned int depth_offset, IGCTensor *output) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + + ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimX) != output->info()->dimension(Window::DimX)); + ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimY) != output->info()->dimension(Window::DimY)); ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) + depth_offset > output->info()->dimension(2)); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) > output->info()->dimension(0)); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) > output->info()->dimension(1)); ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(3, input, output); - // The gaps between the two lowest dimensions of input and output need to be divisible by 2 - // Otherwise it is not clear how the padding should be added onto the input tensor - ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) - input->info()->dimension(0)) % 2); - ARM_COMPUTE_ERROR_ON((output->info()->dimension(1) - input->info()->dimension(1)) % 2); - _input = input; _output = output; _depth_offset = depth_offset; @@ -73,35 +63,20 @@ void GCDepthConcatenateLayerKernel::configure(const IGCTensor *input, unsigned i build_opts.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(1)); build_opts.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(1)); - // Configure kernel window - _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2; - _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2; - - build_opts.emplace("#define OFFSET_X " + support::cpp11::to_string(_left_right)); - build_opts.emplace("#define OFFSET_Y " + support::cpp11::to_string(_top_bottom)); - // Create kernel _kernel = static_cast(GCKernelLibrary::get().create_kernel("concatenate_depth", build_opts)); unsigned int num_elems_processed_per_iteration = 1; - unsigned int num_elems_read_per_iteration = 1; - if(input->info()->data_type() == DataType::F32) - { - num_elems_processed_per_iteration = 1; - num_elems_read_per_iteration = 1; - } - else if(input->info()->data_type() == DataType::F16) + if(input->info()->data_type() == DataType::F16) { num_elems_processed_per_iteration = 4; - num_elems_read_per_iteration = 4; } - const unsigned int num_rows_read_per_iteration = 1; // The window needs to be based on input as we copy all the depths of input Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration)); win.set(Window::DimZ, Window::Dimension(0, input->info()->tensor_shape().z(), 1)); - AccessWindowRectangle input_access(input->info(), -_left_right, -_top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); + AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration); AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); update_window_and_padding(win, input_access, output_access); output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); @@ -118,11 +93,9 @@ void GCDepthConcatenateLayerKernel::run(const Window &window) _output->set_needs_shifting(true); - Window slice = window.first_slice_window_3D(); Window slice_in = window.first_slice_window_3D(); Window slice_out = window.first_slice_window_3D(); - slice.shift(Window::DimX, -(_output->info()->padding()).left); slice_out.set(Window::DimZ, Window::Dimension(_depth_offset)); do @@ -133,7 +106,7 @@ void GCDepthConcatenateLayerKernel::run(const Window &window) _kernel.update_shader_params(); - enqueue(*this, slice); + enqueue(*this, slice_in); } - while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_in)); + while(window.slide_window_slice_3D(slice_in)); } diff --git a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp index 8352c94586..b360e9e6be 100644 --- a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp +++ b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp @@ -42,18 +42,13 @@ using namespace arm_compute; namespace { template -void depth_concat(const ITensor *in, ITensor *out, std::pair start_xy, int depth_offset, const Window &window) +void depth_concat(const ITensor *in, ITensor *out, int depth_offset, const Window &window) { - const int start_x = start_xy.first; - const int start_y = start_xy.second; - // Offset input - const int input_offset_to_first_elements_in_bytes = in->info()->offset_first_element_in_bytes() - start_x * in->info()->strides_in_bytes()[0] - start_y * in->info()->strides_in_bytes()[1]; - uint8_t *input_ptr = in->buffer() + input_offset_to_first_elements_in_bytes; + uint8_t *input_ptr = in->buffer() + in->info()->offset_first_element_in_bytes(); // Offset output - const unsigned int output_offset_to_first_elements_in_bytes = out->info()->offset_first_element_in_bytes() + depth_offset * out->info()->strides_in_bytes()[2]; - uint8_t *output_ptr = out->buffer() + output_offset_to_first_elements_in_bytes; + uint8_t *output_ptr = out->buffer() + out->info()->offset_first_element_in_bytes() + depth_offset * out->info()->strides_in_bytes()[2]; Iterator input(in, window); Iterator output(out, window); @@ -88,19 +83,13 @@ std::pair validate_and_configure_window(ITensorInfo *input, unsi { ARM_COMPUTE_UNUSED(depth_offset); - // Configure kernel window - const int left_right = (output->dimension(0) - input->dimension(0)) / 2; - const int top_bottom = (output->dimension(1) - input->dimension(1)) / 2; - const unsigned int num_elems_processed_per_iteration = 16 / input->element_size(); - const unsigned int num_elems_read_per_iteration = 16 / input->element_size(); - const unsigned int num_rows_read_per_iteration = 1; // The window needs to be based on input as we copy all the depths of input Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1)); - AccessWindowRectangle input_access(input, -left_right, -top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); bool window_changed = update_window_and_padding(win, input_access, output_access); output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); @@ -116,28 +105,18 @@ Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, c ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimX) != output->dimension(Window::DimX)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimY) != output->dimension(Window::DimY)); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) + depth_offset > output->dimension(2)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) > output->dimension(0)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) > output->dimension(1)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, input, output); - // The gaps between the two lowest dimensions of input and output need to be divisible by 2 - // Otherwise it is not clear how the padding should be added onto the input tensor - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) - input->dimension(0)) % 2); - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(1) - input->dimension(1)) % 2); - return Status{}; } } // namespace NEDepthConcatenateLayerKernel::NEDepthConcatenateLayerKernel() - : _func(nullptr), _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0) -{ -} - -BorderSize NEDepthConcatenateLayerKernel::border_size() const + : _func(nullptr), _input(nullptr), _output(nullptr), _depth_offset(0) { - return BorderSize(_top_bottom, _left_right); } void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int depth_offset, ITensor *output) @@ -149,8 +128,6 @@ void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int _input = input; _output = output; _depth_offset = depth_offset; - _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2; - _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2; switch(input->info()->data_type()) { @@ -190,5 +167,5 @@ void NEDepthConcatenateLayerKernel::run(const Window &window, const ThreadInfo & ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); ARM_COMPUTE_ERROR_ON(_func == nullptr); - (*_func)(_input, _output, std::make_pair(_left_right, _top_bottom), _depth_offset, window); + (*_func)(_input, _output, _depth_offset, window); } diff --git a/src/graph/nodes/ConcatenateLayerNode.cpp b/src/graph/nodes/ConcatenateLayerNode.cpp index 48da8b6e9e..ff515c4427 100644 --- a/src/graph/nodes/ConcatenateLayerNode.cpp +++ b/src/graph/nodes/ConcatenateLayerNode.cpp @@ -68,6 +68,7 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect TensorDescriptor output_descriptor = input_descriptors[0]; const int axis_idx = get_dimension_idx(output_descriptor.layout, axis); + ARM_COMPUTE_ERROR_ON_MSG(axis_idx > 2, "Unsupported concatenation axis!"); // Extract shapes std::vector shapes; @@ -76,18 +77,7 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect shapes.emplace_back(&input_descriptor.shape); } - if(axis_idx < 2) - { - output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(shapes, axis_idx); - } - else if(axis_idx == 2) - { - output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(shapes); - } - else - { - ARM_COMPUTE_ERROR("Unsupported concatenation axis!"); - } + output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(shapes, axis_idx); return output_descriptor; } diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp index b9b3c5bb80..b8224d2cce 100644 --- a/src/runtime/CL/functions/CLConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp @@ -56,15 +56,7 @@ void CLConcatenateLayer::configure(const std::vector &inputs_vector ARM_COMPUTE_ERROR_ON_NULLPTR(t); return t->info(); }); - TensorShape output_shape{}; - if(_axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); - } + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -143,19 +135,6 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); ARM_COMPUTE_RETURN_ERROR_ON(num_inputs < 2); - // Output auto inizialitation if not yet initialized - TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape{}; - if(axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); - } - auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); - unsigned int offset = 0; switch(axis) { @@ -166,19 +145,19 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec case 2: // Validate WidthConcatenate2Tensors kernels if there are 2 inputs ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1]); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(inputs_vector[0], inputs_vector[1], &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(inputs_vector[0], inputs_vector[1], output)); break; case 4: // Validate WidthConcatenate4Tensors kernels if there are 4 inputs ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3]); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate4TensorsKernel::validate(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3], &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate4TensorsKernel::validate(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3], output)); break; default: // Validate generic case of WidthConcatenate kernel for(const auto &input : inputs_vector) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, output)); offset += input->dimension(axis); } break; @@ -189,7 +168,7 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec { for(const auto &input : inputs_vector) { - ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, output)); offset += input->dimension(axis); } break; @@ -198,7 +177,7 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec { for(const auto &input : inputs_vector) { - ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, output)); offset += input->dimension(axis); } break; @@ -207,6 +186,12 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec ARM_COMPUTE_ERROR("Axis not supported"); } + if(output->total_size() != 0) + { + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); + ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); + } + return Status{}; } diff --git a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp index e46647a20c..4a5f845631 100644 --- a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp @@ -56,7 +56,7 @@ void CLDepthConcatenateLayer::configure(const std::vector &inputs_v _concat_kernels_vector = arm_compute::support::cpp14::make_unique(_num_inputs); _border_handlers_vector = arm_compute::support::cpp14::make_unique(_num_inputs); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector_info); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -82,7 +82,7 @@ Status CLDepthConcatenateLayer::validate(const std::vector &input // Output auto inizialitation if not yet initialized TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimZ); auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); unsigned int depth_offset = 0; diff --git a/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp new file mode 100644 index 0000000000..506f648171 --- /dev/null +++ b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h" +#include "arm_compute/core/PixelValue.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +GCConcatenateLayer::GCConcatenateLayer() + : _concat_kernels(), + _num_inputs(0), + _axis(Window::DimZ) +{ +} + +void GCConcatenateLayer::configure(std::vector inputs_vector, IGCTensor *output, size_t axis) +{ + ARM_COMPUTE_ERROR_ON(inputs_vector.size() < 2); + + _num_inputs = inputs_vector.size(); + _axis = axis; + + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); + + unsigned int offset = 0; + switch(axis) + { + case Window::DimZ: + { + for(unsigned int i = 0; i < _num_inputs; ++i) + { + auto kernel = support::cpp14::make_unique(); + kernel->configure(inputs_vector.at(i), offset, output); + offset += inputs_vector.at(i)->info()->dimension(axis); + _concat_kernels.emplace_back(std::move(kernel)); + } + break; + } + default: + ARM_COMPUTE_ERROR("Axis not supported"); + } +} + +void GCConcatenateLayer::run() +{ + for(auto &kernel : _concat_kernels) + { + GCScheduler::get().dispatch(*kernel, true); + } +} +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp index e02c0c2c7a..b8cfa2b8f2 100644 --- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp @@ -56,15 +56,7 @@ void NEConcatenateLayer::configure(const std::vector &inputs_vector, ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i)); inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); } - TensorShape output_shape{}; - if(_axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); - } + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -109,19 +101,6 @@ Status NEConcatenateLayer::validate(const std::vector &inputs_vec ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2); - // Output auto inizialitation if not yet initialized - TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape{}; - if(axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); - } - auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); - unsigned int offset = 0; for(const auto &input : inputs_vector) { @@ -130,17 +109,17 @@ Status NEConcatenateLayer::validate(const std::vector &inputs_vec { case Window::DimX: { - ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, output)); break; } case Window::DimY: { - ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, output)); break; } case Window::DimZ: { - ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, output)); break; } default: @@ -149,6 +128,12 @@ Status NEConcatenateLayer::validate(const std::vector &inputs_vec offset += input->dimension(axis); } + if(output->total_size() != 0) + { + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); + ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); + } + return Status{}; } diff --git a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp index 49db855f21..b814bffa96 100644 --- a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -54,7 +54,7 @@ void NEDepthConcatenateLayer::configure(const std::vector &inputs_vec { inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); } - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector_info); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -80,7 +80,7 @@ Status NEDepthConcatenateLayer::validate(const std::vector &input // Output auto inizialitation if not yet initialized TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimZ); auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); unsigned int depth_offset = 0; diff --git a/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h b/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h index 7f7ed8b20b..4db27e707a 100644 --- a/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h +++ b/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -100,7 +100,7 @@ public: src_ptrs.emplace_back(&_srcs.back()); } - TensorShape dst_shape = misc::shape_calculator::calculate_depth_concatenate_shape(src_ptrs); + TensorShape dst_shape = misc::shape_calculator::calculate_concatenate_shape(src_ptrs, Window::DimZ); _dst = create_tensor(dst_shape, data_type, 1); _depth_concat.configure(src_ptrs, &_dst); diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp index 5da8a34351..beda637ef3 100644 --- a/tests/validation/CL/DepthConcatenateLayer.cpp +++ b/tests/validation/CL/DepthConcatenateLayer.cpp @@ -83,8 +83,8 @@ TEST_CASE(Configuration, framework::DatasetMode::ALL) { // Create tensors CLTensor src1 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); - CLTensor src2 = create_tensor(TensorShape(32U, 32U, 32U), DataType::F32, 1); - CLTensor src3 = create_tensor(TensorShape(16U, 32U, 32U), DataType::F32, 1); + CLTensor src2 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); + CLTensor src3 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); CLTensor dst; ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/LSTMLayer.cpp b/tests/validation/CL/LSTMLayer.cpp index ea20bd6c2c..71a9383d93 100644 --- a/tests/validation/CL/LSTMLayer.cpp +++ b/tests/validation/CL/LSTMLayer.cpp @@ -109,7 +109,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), })), framework::dataset::make("ScratchInfo", { TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), diff --git a/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp b/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp index 7af3050c1d..04e91d63ae 100644 --- a/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp +++ b/tests/validation/GLES_COMPUTE/DepthConcatenateLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,14 +24,14 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h" #include "arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h" -#include "arm_compute/runtime/GLES_COMPUTE/functions/GCDepthConcatenateLayer.h" +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h" #include "tests/GLES_COMPUTE/GCAccessor.h" #include "tests/datasets/ShapeDatasets.h" #include "tests/framework/Asserts.h" #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" #include "tests/validation/Validation.h" -#include "tests/validation/fixtures/DepthConcatenateLayerFixture.h" +#include "tests/validation/fixtures/ConcatenateLayerFixture.h" namespace arm_compute { @@ -42,21 +42,23 @@ namespace validation TEST_SUITE(GC) TEST_SUITE(DepthConcatenateLayer) -//TODO(COMPMID-415): Add configuration test? - template -using GCDepthConcatenateLayerFixture = DepthConcatenateLayerValidationFixture; +using GCDepthConcatenateLayerFixture = ConcatenateLayerValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, GCDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmall, GCDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::Small3DShapes(), + framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 2))) { // Validate output validate(GCAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::Large3DShapes(), + framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 2))) { // Validate output validate(GCAccessor(_target), _reference); @@ -64,14 +66,17 @@ FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthConcatenateLayerFixture, framework TEST_SUITE_END() TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, GCDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, GCDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::Small3DShapes(), + framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 2))) { // Validate output validate(GCAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLarge, GCDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::Large3DShapes(), framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 2))) { // Validate output validate(GCAccessor(_target), _reference); diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp index e9612f2223..504dbfd3a4 100644 --- a/tests/validation/Helpers.cpp +++ b/tests/validation/Helpers.cpp @@ -74,46 +74,6 @@ void fill_mask_from_pattern(uint8_t *mask, int cols, int rows, MatrixPattern pat } } -TensorShape calculate_depth_concatenate_shape(const std::vector &input_shapes) -{ - ARM_COMPUTE_ERROR_ON(input_shapes.empty()); - - TensorShape out_shape = input_shapes[0]; - - size_t max_x = 0; - size_t max_y = 0; - size_t depth = 0; - - for(const auto &shape : input_shapes) - { - max_x = std::max(shape.x(), max_x); - max_y = std::max(shape.y(), max_y); - depth += shape.z(); - } - - out_shape.set(0, max_x); - out_shape.set(1, max_y); - out_shape.set(2, depth); - - return out_shape; -} - -TensorShape calculate_concatenate_shape(const std::vector &input_shapes, size_t axis) -{ - ARM_COMPUTE_ERROR_ON(input_shapes.empty()); - TensorShape out_shape = input_shapes[0]; - ARM_COMPUTE_ERROR_ON(axis >= out_shape.num_dimensions()); - - const int new_size = std::accumulate(input_shapes.begin(), input_shapes.end(), 0, [&](int sum, const TensorShape & shape) - { - ARM_COMPUTE_ERROR_ON(axis >= shape.num_dimensions()); - return sum + shape[axis]; - }); - out_shape.set(axis, new_size); - - return out_shape; -} - HarrisCornersParameters harris_corners_parameters() { HarrisCornersParameters params; diff --git a/tests/validation/NEON/LSTMLayer.cpp b/tests/validation/NEON/LSTMLayer.cpp index 5dfd32b8e5..b27dfae8fa 100644 --- a/tests/validation/NEON/LSTMLayer.cpp +++ b/tests/validation/NEON/LSTMLayer.cpp @@ -109,7 +109,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), })), framework::dataset::make("ScratchInfo", { TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), diff --git a/tests/validation/fixtures/ConcatenateLayerFixture.h b/tests/validation/fixtures/ConcatenateLayerFixture.h index 39d4f9f95d..d1eed63d41 100644 --- a/tests/validation/fixtures/ConcatenateLayerFixture.h +++ b/tests/validation/fixtures/ConcatenateLayerFixture.h @@ -73,7 +73,7 @@ public: // Generate more shapes based on the input for(auto &s : shapes) { - // Randomly change the first dimension + // Randomly change the dimension if(mutate_dis(gen)) { // Decrease the dimension by a small percentage. Don't increase @@ -144,18 +144,20 @@ protected: return dst; } - SimpleTensor compute_reference(const std::vector &shapes, const std::vector &qinfo, DataType data_type, unsigned int axis) + SimpleTensor compute_reference(std::vector &shapes, const std::vector &qinfo, DataType data_type, unsigned int axis) { std::vector> srcs; + std::vector src_ptrs; // Create and fill tensors for(size_t j = 0; j < shapes.size(); ++j) { srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]); fill(srcs.back(), j); + src_ptrs.emplace_back(&shapes[j]); } - const TensorShape dst_shape = calculate_concatenate_shape(shapes, axis); + const TensorShape dst_shape = misc::shape_calculator::calculate_concatenate_shape(src_ptrs, axis); SimpleTensor dst{ dst_shape, data_type, 1, qinfo[shapes.size()] }; return reference::concatenate_layer(srcs, dst, axis); } diff --git a/tests/validation/fixtures/DepthConcatenateLayerFixture.h b/tests/validation/fixtures/DepthConcatenateLayerFixture.h deleted file mode 100644 index edeefa228a..0000000000 --- a/tests/validation/fixtures/DepthConcatenateLayerFixture.h +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/Helpers.h" -#include "tests/validation/reference/DepthConcatenateLayer.h" - -#include - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template -class DepthConcatenateLayerValidationFixture : public framework::Fixture -{ -public: - template - void setup(TensorShape shape, DataType data_type) - { - // Create input shapes - std::mt19937 gen(library->seed()); - std::uniform_int_distribution<> num_dis(2, 4); - std::uniform_int_distribution<> offset_dis(0, 20); - - const int num_tensors = num_dis(gen); - - std::vector shapes(num_tensors, shape); - - // vector holding the quantization info: - // the last element is the output quantization info - // all other elements are the quantization info for the input tensors - std::vector qinfo(num_tensors + 1, QuantizationInfo()); - - for(auto &qi : qinfo) - { - qi = QuantizationInfo(1.f / 255.f, offset_dis(gen)); - } - - std::uniform_int_distribution<> depth_dis(1, 3); - std::bernoulli_distribution mutate_dis(0.5f); - std::uniform_real_distribution<> change_dis(-0.25f, 0.f); - - // Generate more shapes based on the input - for(auto &s : shapes) - { - // Set the depth of the tensor - s.set(2, depth_dis(gen)); - - // Randomly change the first dimension - if(mutate_dis(gen)) - { - // Decrease the dimension by a small percentage. Don't increase - // as that could make tensor too large. Also the change must be - // an even number. Otherwise out depth concatenate fails. - s.set(0, s[0] + 2 * static_cast(s[0] * change_dis(gen))); - } - - // Repeat the same as above for the second dimension - if(mutate_dis(gen)) - { - s.set(1, s[1] + 2 * static_cast(s[1] * change_dis(gen))); - } - } - - _target = compute_target(shapes, qinfo, data_type); - _reference = compute_reference(shapes, qinfo, data_type); - } - -protected: - template - void fill(U &&tensor, int i) - { - library->fill_tensor_uniform(tensor, i); - } - - TensorType compute_target(std::vector shapes, const std::vector &qinfo, DataType data_type) - { - std::vector srcs; - std::vector src_ptrs; - - // Create tensors - srcs.reserve(shapes.size()); - - for(size_t j = 0; j < shapes.size(); ++j) - { - srcs.emplace_back(create_tensor(shapes[j], data_type, 1, qinfo[j])); - src_ptrs.emplace_back(&srcs.back()); - } - - TensorShape dst_shape = misc::shape_calculator::calculate_depth_concatenate_shape(src_ptrs); - TensorType dst = create_tensor(dst_shape, data_type, 1, qinfo[shapes.size()]); - - // Create and configure function - FunctionType depth_concat; - depth_concat.configure(src_ptrs, &dst); - - for(auto &src : srcs) - { - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - } - - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - for(auto &src : srcs) - { - src.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - } - - dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - int i = 0; - for(auto &src : srcs) - { - fill(AccessorType(src), i++); - } - - // Compute function - depth_concat.run(); - - return dst; - } - - SimpleTensor compute_reference(std::vector shapes, const std::vector &qinfo, DataType data_type) - { - std::vector> srcs; - - // Create and fill tensors - for(size_t j = 0; j < shapes.size(); ++j) - { - srcs.emplace_back(shapes[j], data_type, 1, qinfo[j]); - fill(srcs.back(), j); - } - - const TensorShape dst_shape = calculate_depth_concatenate_shape(shapes); - SimpleTensor dst{ dst_shape, data_type, 1, qinfo[shapes.size()] }; - - return reference::depthconcatenate_layer(srcs, dst); - } - - TensorType _target{}; - SimpleTensor _reference{}; -}; -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE */ -- cgit v1.2.1