From a9c4472188abef421adb589e2a6fef52727d465f Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 5 Apr 2019 17:18:36 +0100 Subject: COMPMID-2051 Refactor shape_calculator::calculate_concatenate_shape Change-Id: Ibf316718d11fa975d75f226925747b21c4efd127 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/974 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- src/core/CL/cl_kernels/concatenate.cl | 6 +- .../CL/kernels/CLDepthConcatenateLayerKernel.cpp | 42 ++--------- src/core/GLES_COMPUTE/cs_shaders/concatenate.cs | 6 +- .../kernels/GCDepthConcatenateLayerKernel.cpp | 45 +++--------- .../NEON/kernels/NEDepthConcatenateLayerKernel.cpp | 39 +++-------- src/graph/nodes/ConcatenateLayerNode.cpp | 14 +--- src/runtime/CL/functions/CLConcatenateLayer.cpp | 39 ++++------- .../CL/functions/CLDepthConcatenateLayer.cpp | 4 +- .../GLES_COMPUTE/functions/GCConcatenateLayer.cpp | 81 ++++++++++++++++++++++ src/runtime/NEON/functions/NEConcatenateLayer.cpp | 35 +++------- .../NEON/functions/NEDepthConcatenateLayer.cpp | 6 +- 11 files changed, 140 insertions(+), 177 deletions(-) create mode 100644 src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp (limited to 'src') diff --git a/src/core/CL/cl_kernels/concatenate.cl b/src/core/CL/cl_kernels/concatenate.cl index 99f4659960..23ebcf91b6 100644 --- a/src/core/CL/cl_kernels/concatenate.cl +++ b/src/core/CL/cl_kernels/concatenate.cl @@ -409,19 +409,19 @@ __kernel void concatenate_height( __kernel void concatenate_depth( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), - int3 offsets) + int offset) { Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - source_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&src, -offsets.x, -offsets.y, 0)); + source_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr); #if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) source_values = requantize(source_values, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT); #endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */ VSTORE(VEC_SIZE) - (source_values, 0, (__global DATA_TYPE *)(dst.ptr + offsets.z)); + (source_values, 0, (__global DATA_TYPE *)(dst.ptr + offset)); } #endif /* defined(DATA_TYPE) && defined(VEC_SIZE) */ diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp index 3fccc0447d..1cae3712dc 100644 --- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp +++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,19 +47,13 @@ std::pair validate_and_configure_window(ITensorInfo *input, unsi { ARM_COMPUTE_UNUSED(depth_offset); - // Configure kernel window - const int left_right = (output->dimension(0) - input->dimension(0)) / 2; - const int top_bottom = (output->dimension(1) - input->dimension(1)) / 2; - const unsigned int num_elems_processed_per_iteration = 16 / input->element_size(); - const unsigned int num_elems_read_per_iteration = 16 / input->element_size(); - const unsigned int num_rows_read_per_iteration = 1; // The window needs to be based on input as we copy all the depths of input Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1)); - AccessWindowRectangle input_access(input, -left_right, -top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); bool window_changed = update_window_and_padding(win, input_access, output_access); output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); @@ -74,30 +68,20 @@ Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, c ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimX) != output->dimension(Window::DimX)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimY) != output->dimension(Window::DimY)); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) + depth_offset > output->dimension(2)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) > output->dimension(0)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) > output->dimension(1)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, input, output); - // The gaps between the two lowest dimensions of input and output need to be divisible by 2 - // Otherwise it is not clear how the padding should be added onto the input tensor - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) - input->dimension(0)) % 2); - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(1) - input->dimension(1)) % 2); - return Status{}; } } // namespace CLDepthConcatenateLayerKernel::CLDepthConcatenateLayerKernel() - : _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0) + : _input(nullptr), _output(nullptr), _depth_offset(0) { } -BorderSize CLDepthConcatenateLayerKernel::border_size() const -{ - return BorderSize(_top_bottom, _left_right); -} - void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned int depth_offset, ICLTensor *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); @@ -124,10 +108,6 @@ void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i // Create kernel _kernel = static_cast(CLKernelLibrary::get().create_kernel("concatenate_depth", build_opts.options())); - // Configure kernel window - _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2; - _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2; - // Configure kernel window auto win_config = validate_and_configure_window(input->info(), depth_offset, output->info()); ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); @@ -153,16 +133,8 @@ void CLDepthConcatenateLayerKernel::run(const Window &window, cl::CommandQueue & const int offset_to_first_elements_in_bytes = _depth_offset * _output->info()->strides_in_bytes()[2]; - unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters - const cl_int3 offsets = - { - { - static_cast(_left_right), - static_cast(_top_bottom), - static_cast(offset_to_first_elements_in_bytes), - } - }; - _kernel.setArg(idx, offsets); + unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters + _kernel.setArg(idx, offset_to_first_elements_in_bytes); do { diff --git a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs b/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs index 69ac50b4d0..49b3954fca 100644 --- a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs +++ b/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -53,7 +53,7 @@ void main(void) Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift); Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift); - float tmp = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, -OFFSET_X, -OFFSET_Y, 0)); + float tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter); STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp); } @@ -66,7 +66,7 @@ void main(void) Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift); Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift); - uvec2 tmp = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, -OFFSET_X, -OFFSET_Y, 0)); + uvec2 tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter); STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp); } #endif /*DATA_TYPE_FP16*/ diff --git a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp index 36d1b29bba..6f70efe5c7 100644 --- a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp +++ b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,29 +38,19 @@ using namespace arm_compute; GCDepthConcatenateLayerKernel::GCDepthConcatenateLayerKernel() - : _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0) + : _input(nullptr), _output(nullptr), _depth_offset(0) { } - -BorderSize GCDepthConcatenateLayerKernel::border_size() const -{ - return BorderSize(_top_bottom, _left_right); -} - void GCDepthConcatenateLayerKernel::configure(const IGCTensor *input, unsigned int depth_offset, IGCTensor *output) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + + ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimX) != output->info()->dimension(Window::DimX)); + ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimY) != output->info()->dimension(Window::DimY)); ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) + depth_offset > output->info()->dimension(2)); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) > output->info()->dimension(0)); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) > output->info()->dimension(1)); ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(3, input, output); - // The gaps between the two lowest dimensions of input and output need to be divisible by 2 - // Otherwise it is not clear how the padding should be added onto the input tensor - ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) - input->info()->dimension(0)) % 2); - ARM_COMPUTE_ERROR_ON((output->info()->dimension(1) - input->info()->dimension(1)) % 2); - _input = input; _output = output; _depth_offset = depth_offset; @@ -73,35 +63,20 @@ void GCDepthConcatenateLayerKernel::configure(const IGCTensor *input, unsigned i build_opts.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(1)); build_opts.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(1)); - // Configure kernel window - _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2; - _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2; - - build_opts.emplace("#define OFFSET_X " + support::cpp11::to_string(_left_right)); - build_opts.emplace("#define OFFSET_Y " + support::cpp11::to_string(_top_bottom)); - // Create kernel _kernel = static_cast(GCKernelLibrary::get().create_kernel("concatenate_depth", build_opts)); unsigned int num_elems_processed_per_iteration = 1; - unsigned int num_elems_read_per_iteration = 1; - if(input->info()->data_type() == DataType::F32) - { - num_elems_processed_per_iteration = 1; - num_elems_read_per_iteration = 1; - } - else if(input->info()->data_type() == DataType::F16) + if(input->info()->data_type() == DataType::F16) { num_elems_processed_per_iteration = 4; - num_elems_read_per_iteration = 4; } - const unsigned int num_rows_read_per_iteration = 1; // The window needs to be based on input as we copy all the depths of input Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration)); win.set(Window::DimZ, Window::Dimension(0, input->info()->tensor_shape().z(), 1)); - AccessWindowRectangle input_access(input->info(), -_left_right, -_top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); + AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration); AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); update_window_and_padding(win, input_access, output_access); output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); @@ -118,11 +93,9 @@ void GCDepthConcatenateLayerKernel::run(const Window &window) _output->set_needs_shifting(true); - Window slice = window.first_slice_window_3D(); Window slice_in = window.first_slice_window_3D(); Window slice_out = window.first_slice_window_3D(); - slice.shift(Window::DimX, -(_output->info()->padding()).left); slice_out.set(Window::DimZ, Window::Dimension(_depth_offset)); do @@ -133,7 +106,7 @@ void GCDepthConcatenateLayerKernel::run(const Window &window) _kernel.update_shader_params(); - enqueue(*this, slice); + enqueue(*this, slice_in); } - while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_in)); + while(window.slide_window_slice_3D(slice_in)); } diff --git a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp index 8352c94586..b360e9e6be 100644 --- a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp +++ b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp @@ -42,18 +42,13 @@ using namespace arm_compute; namespace { template -void depth_concat(const ITensor *in, ITensor *out, std::pair start_xy, int depth_offset, const Window &window) +void depth_concat(const ITensor *in, ITensor *out, int depth_offset, const Window &window) { - const int start_x = start_xy.first; - const int start_y = start_xy.second; - // Offset input - const int input_offset_to_first_elements_in_bytes = in->info()->offset_first_element_in_bytes() - start_x * in->info()->strides_in_bytes()[0] - start_y * in->info()->strides_in_bytes()[1]; - uint8_t *input_ptr = in->buffer() + input_offset_to_first_elements_in_bytes; + uint8_t *input_ptr = in->buffer() + in->info()->offset_first_element_in_bytes(); // Offset output - const unsigned int output_offset_to_first_elements_in_bytes = out->info()->offset_first_element_in_bytes() + depth_offset * out->info()->strides_in_bytes()[2]; - uint8_t *output_ptr = out->buffer() + output_offset_to_first_elements_in_bytes; + uint8_t *output_ptr = out->buffer() + out->info()->offset_first_element_in_bytes() + depth_offset * out->info()->strides_in_bytes()[2]; Iterator input(in, window); Iterator output(out, window); @@ -88,19 +83,13 @@ std::pair validate_and_configure_window(ITensorInfo *input, unsi { ARM_COMPUTE_UNUSED(depth_offset); - // Configure kernel window - const int left_right = (output->dimension(0) - input->dimension(0)) / 2; - const int top_bottom = (output->dimension(1) - input->dimension(1)) / 2; - const unsigned int num_elems_processed_per_iteration = 16 / input->element_size(); - const unsigned int num_elems_read_per_iteration = 16 / input->element_size(); - const unsigned int num_rows_read_per_iteration = 1; // The window needs to be based on input as we copy all the depths of input Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1)); - AccessWindowRectangle input_access(input, -left_right, -top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); bool window_changed = update_window_and_padding(win, input_access, output_access); output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); @@ -116,28 +105,18 @@ Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, c ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimX) != output->dimension(Window::DimX)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimY) != output->dimension(Window::DimY)); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) + depth_offset > output->dimension(2)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) > output->dimension(0)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) > output->dimension(1)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, input, output); - // The gaps between the two lowest dimensions of input and output need to be divisible by 2 - // Otherwise it is not clear how the padding should be added onto the input tensor - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) - input->dimension(0)) % 2); - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(1) - input->dimension(1)) % 2); - return Status{}; } } // namespace NEDepthConcatenateLayerKernel::NEDepthConcatenateLayerKernel() - : _func(nullptr), _input(nullptr), _output(nullptr), _top_bottom(0), _left_right(0), _depth_offset(0) -{ -} - -BorderSize NEDepthConcatenateLayerKernel::border_size() const + : _func(nullptr), _input(nullptr), _output(nullptr), _depth_offset(0) { - return BorderSize(_top_bottom, _left_right); } void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int depth_offset, ITensor *output) @@ -149,8 +128,6 @@ void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int _input = input; _output = output; _depth_offset = depth_offset; - _left_right = (output->info()->dimension(0) - input->info()->dimension(0)) / 2; - _top_bottom = (output->info()->dimension(1) - input->info()->dimension(1)) / 2; switch(input->info()->data_type()) { @@ -190,5 +167,5 @@ void NEDepthConcatenateLayerKernel::run(const Window &window, const ThreadInfo & ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); ARM_COMPUTE_ERROR_ON(_func == nullptr); - (*_func)(_input, _output, std::make_pair(_left_right, _top_bottom), _depth_offset, window); + (*_func)(_input, _output, _depth_offset, window); } diff --git a/src/graph/nodes/ConcatenateLayerNode.cpp b/src/graph/nodes/ConcatenateLayerNode.cpp index 48da8b6e9e..ff515c4427 100644 --- a/src/graph/nodes/ConcatenateLayerNode.cpp +++ b/src/graph/nodes/ConcatenateLayerNode.cpp @@ -68,6 +68,7 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect TensorDescriptor output_descriptor = input_descriptors[0]; const int axis_idx = get_dimension_idx(output_descriptor.layout, axis); + ARM_COMPUTE_ERROR_ON_MSG(axis_idx > 2, "Unsupported concatenation axis!"); // Extract shapes std::vector shapes; @@ -76,18 +77,7 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect shapes.emplace_back(&input_descriptor.shape); } - if(axis_idx < 2) - { - output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(shapes, axis_idx); - } - else if(axis_idx == 2) - { - output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(shapes); - } - else - { - ARM_COMPUTE_ERROR("Unsupported concatenation axis!"); - } + output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(shapes, axis_idx); return output_descriptor; } diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp index b9b3c5bb80..b8224d2cce 100644 --- a/src/runtime/CL/functions/CLConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp @@ -56,15 +56,7 @@ void CLConcatenateLayer::configure(const std::vector &inputs_vector ARM_COMPUTE_ERROR_ON_NULLPTR(t); return t->info(); }); - TensorShape output_shape{}; - if(_axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); - } + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -143,19 +135,6 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); ARM_COMPUTE_RETURN_ERROR_ON(num_inputs < 2); - // Output auto inizialitation if not yet initialized - TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape{}; - if(axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); - } - auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); - unsigned int offset = 0; switch(axis) { @@ -166,19 +145,19 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec case 2: // Validate WidthConcatenate2Tensors kernels if there are 2 inputs ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1]); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(inputs_vector[0], inputs_vector[1], &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(inputs_vector[0], inputs_vector[1], output)); break; case 4: // Validate WidthConcatenate4Tensors kernels if there are 4 inputs ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3]); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate4TensorsKernel::validate(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3], &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate4TensorsKernel::validate(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3], output)); break; default: // Validate generic case of WidthConcatenate kernel for(const auto &input : inputs_vector) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, output)); offset += input->dimension(axis); } break; @@ -189,7 +168,7 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec { for(const auto &input : inputs_vector) { - ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, output)); offset += input->dimension(axis); } break; @@ -198,7 +177,7 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec { for(const auto &input : inputs_vector) { - ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, output)); offset += input->dimension(axis); } break; @@ -207,6 +186,12 @@ Status CLConcatenateLayer::validate(const std::vector &inputs_vec ARM_COMPUTE_ERROR("Axis not supported"); } + if(output->total_size() != 0) + { + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); + ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); + } + return Status{}; } diff --git a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp index e46647a20c..4a5f845631 100644 --- a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp @@ -56,7 +56,7 @@ void CLDepthConcatenateLayer::configure(const std::vector &inputs_v _concat_kernels_vector = arm_compute::support::cpp14::make_unique(_num_inputs); _border_handlers_vector = arm_compute::support::cpp14::make_unique(_num_inputs); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector_info); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -82,7 +82,7 @@ Status CLDepthConcatenateLayer::validate(const std::vector &input // Output auto inizialitation if not yet initialized TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimZ); auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); unsigned int depth_offset = 0; diff --git a/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp new file mode 100644 index 0000000000..506f648171 --- /dev/null +++ b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h" +#include "arm_compute/core/PixelValue.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +GCConcatenateLayer::GCConcatenateLayer() + : _concat_kernels(), + _num_inputs(0), + _axis(Window::DimZ) +{ +} + +void GCConcatenateLayer::configure(std::vector inputs_vector, IGCTensor *output, size_t axis) +{ + ARM_COMPUTE_ERROR_ON(inputs_vector.size() < 2); + + _num_inputs = inputs_vector.size(); + _axis = axis; + + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); + + unsigned int offset = 0; + switch(axis) + { + case Window::DimZ: + { + for(unsigned int i = 0; i < _num_inputs; ++i) + { + auto kernel = support::cpp14::make_unique(); + kernel->configure(inputs_vector.at(i), offset, output); + offset += inputs_vector.at(i)->info()->dimension(axis); + _concat_kernels.emplace_back(std::move(kernel)); + } + break; + } + default: + ARM_COMPUTE_ERROR("Axis not supported"); + } +} + +void GCConcatenateLayer::run() +{ + for(auto &kernel : _concat_kernels) + { + GCScheduler::get().dispatch(*kernel, true); + } +} +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp index e02c0c2c7a..b8cfa2b8f2 100644 --- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp @@ -56,15 +56,7 @@ void NEConcatenateLayer::configure(const std::vector &inputs_vector, ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i)); inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); } - TensorShape output_shape{}; - if(_axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); - } + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -109,19 +101,6 @@ Status NEConcatenateLayer::validate(const std::vector &inputs_vec ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2); - // Output auto inizialitation if not yet initialized - TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape{}; - if(axis == Window::DimZ) - { - output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); - } - else - { - output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); - } - auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); - unsigned int offset = 0; for(const auto &input : inputs_vector) { @@ -130,17 +109,17 @@ Status NEConcatenateLayer::validate(const std::vector &inputs_vec { case Window::DimX: { - ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, output)); break; } case Window::DimY: { - ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, output)); break; } case Window::DimZ: { - ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, output)); break; } default: @@ -149,6 +128,12 @@ Status NEConcatenateLayer::validate(const std::vector &inputs_vec offset += input->dimension(axis); } + if(output->total_size() != 0) + { + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis); + ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); + } + return Status{}; } diff --git a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp index 49db855f21..b814bffa96 100644 --- a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -54,7 +54,7 @@ void NEDepthConcatenateLayer::configure(const std::vector &inputs_vec { inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); } - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector_info); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); @@ -80,7 +80,7 @@ Status NEDepthConcatenateLayer::validate(const std::vector &input // Output auto inizialitation if not yet initialized TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimZ); auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); unsigned int depth_offset = 0; -- cgit v1.2.1