From ae54e026c86aec7d6819ee3ef76372c1a3c92467 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 16 Jul 2018 15:41:27 +0100 Subject: COMPMID-1364: Add support for NHWC in NEDepthConcatenateLayer Change-Id: I4f8e46d1c79afa9284f2c6dc00383c453a8e7bd5 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140165 Reviewed-by: Giorgio Arena Reviewed-by: Pablo Tello Tested-by: Jenkins --- .../NEON/kernels/NEDepthConcatenateLayerKernel.cpp | 107 ++++++++++-------- .../NEON/kernels/NEWidthConcatenateLayerKernel.cpp | 125 +++++++++++++++++++++ src/runtime/CL/functions/CLConcatenateLayer.cpp | 6 +- .../CL/functions/CLDepthConcatenateLayer.cpp | 2 +- src/runtime/NEON/functions/NEConcatenateLayer.cpp | 90 +++++++++++++++ .../NEON/functions/NEDepthConcatenateLayer.cpp | 27 ++++- .../NEON/functions/NEWidthConcatenateLayer.cpp | 96 ++++++++++++++++ 7 files changed, 402 insertions(+), 51 deletions(-) create mode 100644 src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp create mode 100644 src/runtime/NEON/functions/NEConcatenateLayer.cpp create mode 100644 src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp (limited to 'src') diff --git a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp index 38443ca4a8..1b937b5be8 100644 --- a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp +++ b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp @@ -28,37 +28,18 @@ #include "arm_compute/core/IAccessWindow.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/NEFixedPoint.h" +#include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" -#include #include using namespace arm_compute; namespace { -// Overloads of 128-bit vector loads -uint16x8_t loadq(const uint16_t *ptr) -{ - return vld1q_u16(ptr); -} -uint32x4_t loadq(const uint32_t *ptr) -{ - return vld1q_u32(ptr); -} -// Overloads of 128-bit vector stores -void storeq(uint16_t *ptr, uint16x8_t val) -{ - return vst1q_u16(ptr, val); -} -void storeq(uint32_t *ptr, uint32x4_t val) -{ - return vst1q_u32(ptr, val); -} - template void depth_concat(const ITensor *in, ITensor *out, std::pair start_xy, int depth_offset, const Window &window) { @@ -81,10 +62,54 @@ void depth_concat(const ITensor *in, ITensor *out, std::pair start_xy, const auto in_ptr = reinterpret_cast(input_ptr + input.offset()); const auto out_ptr = reinterpret_cast(output_ptr + output.offset()); - storeq(out_ptr, loadq(in_ptr)); + wrapper::vstore(out_ptr, wrapper::vloadq(in_ptr)); }, input, output); } + +std::pair validate_and_configure_window(ITensorInfo *input, unsigned int depth_offset, ITensorInfo *output) +{ + ARM_COMPUTE_UNUSED(depth_offset); + + // Configure kernel window + const int left_right = (output->dimension(0) - input->dimension(0)) / 2; + const int top_bottom = (output->dimension(1) - input->dimension(1)) / 2; + + const unsigned int num_elems_processed_per_iteration = 16 / input->element_size(); + const unsigned int num_elems_read_per_iteration = 16 / input->element_size(); + const unsigned int num_rows_read_per_iteration = 1; + + // The window needs to be based on input as we copy all the depths of input + Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); + win.set(Window::DimZ, Window::Dimension(0, input->tensor_shape().z(), 1)); + + AccessWindowRectangle input_access(input, -left_right, -top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); + AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); + bool window_changed = update_window_and_padding(win, input_access, output_access); + output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); + + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); +} + +Status validate_arguments(const ITensorInfo *input, unsigned int depth_offset, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) + depth_offset > output->dimension(2)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) > output->dimension(0)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) > output->dimension(1)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, input, output); + + // The gaps between the two lowest dimensions of input and output need to be divisible by 2 + // Otherwise it is not clear how the padding should be added onto the input tensor + ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) - input->dimension(0)) % 2); + ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(1) - input->dimension(1)) % 2); + + return Status{}; +} } // namespace NEDepthConcatenateLayerKernel::NEDepthConcatenateLayerKernel() @@ -99,17 +124,8 @@ BorderSize NEDepthConcatenateLayerKernel::border_size() const void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int depth_offset, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) + depth_offset > output->info()->dimension(2)); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) > output->info()->dimension(0)); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) > output->info()->dimension(1)); - ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(3, input, output); - - // The gaps between the two lowest dimensions of input and output need to be divisible by 2 - // Otherwise it is not clear how the padding should be added onto the input tensor - ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) - input->info()->dimension(0)) % 2); - ARM_COMPUTE_ERROR_ON((output->info()->dimension(1) - input->info()->dimension(1)) % 2); + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), depth_offset, output->info())); _func = nullptr; _input = input; @@ -120,6 +136,9 @@ void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int switch(input->info()->data_type()) { + case DataType::QASYMM8: + _func = &depth_concat; + break; case DataType::F16: _func = &depth_concat; break; @@ -130,20 +149,20 @@ void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int ARM_COMPUTE_ERROR("Unsupported data type."); } - const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size(); - const unsigned int num_elems_read_per_iteration = 16 / input->info()->element_size(); - const unsigned int num_rows_read_per_iteration = 1; + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), depth_offset, output->info()); + ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); - // The window needs to be based on input as we copy all the depths of input - Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration)); - win.set(Window::DimZ, Window::Dimension(0, input->info()->tensor_shape().z(), 1)); - - AccessWindowRectangle input_access(input->info(), -_left_right, -_top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - update_window_and_padding(win, input_access, output_access); - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); + INEKernel::configure(std::get<1>(win_config)); +} - INEKernel::configure(win); +Status NEDepthConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *input, + unsigned int depth_offset, + const arm_compute::ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, depth_offset, output)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), depth_offset, output->clone().get()).first); + return Status{}; } void NEDepthConcatenateLayerKernel::run(const Window &window, const ThreadInfo &info) diff --git a/src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp new file mode 100644 index 0000000000..1b38677991 --- /dev/null +++ b/src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/wrapper/wrapper.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" + +#include + +using namespace arm_compute; + +namespace +{ +std::pair validate_and_configure_window(ITensorInfo *input, unsigned int width_offset, ITensorInfo *output) +{ + const unsigned int num_elems_processed_per_iteration = 16 / output->element_size(); + + // The window needs to be based on input as we copy all the widths of input + Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration)); + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); + AccessWindowHorizontal output_access(output, width_offset, num_elems_processed_per_iteration); + bool window_changed = update_window_and_padding(win, input_access, output_access); + + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); +} + +Status validate_arguments(const ITensorInfo *input, unsigned int width_offset, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, + DataType::U8, DataType::S8, DataType::QASYMM8, + DataType::U16, DataType::S16, DataType::F16, + DataType::U32, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) + width_offset > output->dimension(0)); + + for(size_t i = 1; i < Coordinates::num_max_dimensions; ++i) + { + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(i) != output->dimension(i)); + } + ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 3); + + return Status{}; +} +} // namespace + +NEWidthConcatenateLayerKernel::NEWidthConcatenateLayerKernel() + : _input(nullptr), _output(nullptr), _width_offset(0) +{ +} + +void NEWidthConcatenateLayerKernel::configure(const ITensor *input, unsigned int width_offset, ITensor *output) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), width_offset, output->info())); + + _input = input; + _output = output; + _width_offset = width_offset; + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), width_offset, output->info()); + ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); + + INEKernel::configure(std::get<1>(win_config)); +} + +Status NEWidthConcatenateLayerKernel::validate(const ITensorInfo *input, unsigned int width_offset, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, width_offset, output)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), width_offset, output->clone().get()).first); + return Status{}; +} + +void NEWidthConcatenateLayerKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + // Offset output pointer to the correct position + uint8_t *output_ptr = _output->buffer() + _output->info()->offset_first_element_in_bytes() + _width_offset * _output->info()->strides_in_bytes()[0]; + + // Create iterators + Iterator input(_input, window); + Iterator output(_output, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + const auto in_ptr = input.ptr(); + const auto out_ptr = output_ptr + output.offset(); + + wrapper::vstore(out_ptr, wrapper::vloadq(in_ptr)); + }, + input, output); +} diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp index f4bc1ff4ac..018c674c83 100644 --- a/src/runtime/CL/functions/CLConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp @@ -39,7 +39,7 @@ CLConcatenateLayer::CLConcatenateLayer() { } -void CLConcatenateLayer::configure(std::vector inputs_vector, ICLTensor *output, DataLayoutDimension axis) +void CLConcatenateLayer::configure(const std::vector &inputs_vector, ICLTensor *output, DataLayoutDimension axis) { ARM_COMPUTE_ERROR_ON(output == nullptr); @@ -48,14 +48,14 @@ void CLConcatenateLayer::configure(std::vector inputs_vector, ICLTe case 0: { auto func = support::cpp14::make_unique(); - func->configure(std::move(inputs_vector), output); + func->configure(inputs_vector, output); _concat_function = std::move(func); break; } case 2: { auto func = support::cpp14::make_unique(); - func->configure(std::move(inputs_vector), output); + func->configure(inputs_vector, output); _concat_function = std::move(func); break; } diff --git a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp index 174be94410..b5e8fd96d0 100644 --- a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp @@ -43,7 +43,7 @@ CLDepthConcatenateLayer::CLDepthConcatenateLayer() // NOLINT { } -void CLDepthConcatenateLayer::configure(std::vector inputs_vector, ICLTensor *output) // NOLINT +void CLDepthConcatenateLayer::configure(const std::vector &inputs_vector, ICLTensor *output) // NOLINT { _num_inputs = inputs_vector.size(); diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp new file mode 100644 index 0000000000..21ab47d3fe --- /dev/null +++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h" + +#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h" +#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +NEConcatenateLayer::NEConcatenateLayer() + : _concat_function(nullptr) +{ +} + +void NEConcatenateLayer::configure(const std::vector &inputs_vector, ITensor *output, DataLayoutDimension axis) +{ + ARM_COMPUTE_ERROR_ON(output == nullptr); + + switch(get_data_layout_dimension_index(output->info()->data_layout(), axis)) + { + case 0: + { + auto func = support::cpp14::make_unique(); + func->configure(inputs_vector, output); + _concat_function = std::move(func); + break; + } + case 2: + { + auto func = support::cpp14::make_unique(); + func->configure(inputs_vector, output); + _concat_function = std::move(func); + break; + } + default: + ARM_COMPUTE_ERROR("Concatenation is supported across width and depth only!"); + } +} + +Status NEConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis) +{ + ARM_COMPUTE_RETURN_ERROR_ON(output == nullptr); + + switch(get_data_layout_dimension_index(output->data_layout(), axis)) + { + case 0: + ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayer::validate(inputs_vector, output)); + break; + case 2: + ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayer::validate(inputs_vector, output)); + break; + default: + ARM_COMPUTE_RETURN_ERROR_MSG("Concatenation is supported across width and depth only!"); + } + return Status{}; +} + +void NEConcatenateLayer::run() +{ + ARM_COMPUTE_ERROR_ON(_concat_function == nullptr); + _concat_function->run(); +} +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp index cb0157664b..49db855f21 100644 --- a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp @@ -27,6 +27,7 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/PixelValue.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/NEON/NEScheduler.h" @@ -42,10 +43,8 @@ NEDepthConcatenateLayer::NEDepthConcatenateLayer() // NOLINT { } -void NEDepthConcatenateLayer::configure(std::vector inputs_vector, ITensor *output) // NOLINT +void NEDepthConcatenateLayer::configure(const std::vector &inputs_vector, ITensor *output) // NOLINT { - ARM_COMPUTE_ERROR_ON(inputs_vector.size() < 2); - _num_inputs = inputs_vector.size(); _concat_kernels_vector = arm_compute::support::cpp14::make_unique(_num_inputs); _border_handlers_vector = arm_compute::support::cpp14::make_unique(_num_inputs); @@ -59,6 +58,7 @@ void NEDepthConcatenateLayer::configure(std::vector inputs_vector, IT // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); + ARM_COMPUTE_ERROR_THROW_ON(NEDepthConcatenateLayer::validate(inputs_vector_info, output->info())); unsigned int depth_offset = 0; for(unsigned int i = 0; i < _num_inputs; ++i) @@ -73,6 +73,27 @@ void NEDepthConcatenateLayer::configure(std::vector inputs_vector, IT output->info()->set_valid_region(ValidRegion(Coordinates(), output_shape)); } +Status NEDepthConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2); + + // Output auto inizialitation if not yet initialized + TensorInfo tmp_output_info = *output->clone(); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); + auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); + + unsigned int depth_offset = 0; + for(const auto &input : inputs_vector) + { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, depth_offset, &tmp_output_info)); + depth_offset += input->dimension(2); + } + + return Status{}; +} + void NEDepthConcatenateLayer::run() { for(unsigned i = 0; i < _num_inputs; ++i) diff --git a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp new file mode 100644 index 0000000000..097605c062 --- /dev/null +++ b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/NEON/NEScheduler.h" +#include "arm_compute/runtime/Tensor.h" +#include "support/ToolchainSupport.h" + +using namespace arm_compute; + +NEWidthConcatenateLayer::NEWidthConcatenateLayer() + : _concat_kernels_vector(), + _num_inputs(0) +{ +} + +Status NEWidthConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2); + + // Output auto inizialitation if not yet initialized + TensorInfo tmp_output_info = *output->clone(); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector); + auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); + + unsigned int width_offset = 0; + for(const auto &input : inputs_vector) + { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, width_offset, &tmp_output_info)); + width_offset += input->dimension(0); + } + + return Status{}; +} + +void NEWidthConcatenateLayer::configure(std::vector inputs_vector, ITensor *output) +{ + _num_inputs = inputs_vector.size(); + + std::vector inputs_vector_info; + for(unsigned int i = 0; i < _num_inputs; i++) + { + inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); + } + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); + ARM_COMPUTE_ERROR_THROW_ON(NEWidthConcatenateLayer::validate(inputs_vector_info, output->info())); + + unsigned int width_offset = 0; + + _concat_kernels_vector = arm_compute::support::cpp14::make_unique(_num_inputs); + + for(unsigned int i = 0; i < _num_inputs; i++) + { + _concat_kernels_vector[i].configure(inputs_vector.at(i), width_offset, output); + width_offset += inputs_vector.at(i)->info()->dimension(0); + } +} + +void NEWidthConcatenateLayer::run() +{ + for(unsigned i = 0; i < _num_inputs; i++) + { + NEScheduler::get().schedule(_concat_kernels_vector.get() + i, Window::DimY); + } +} -- cgit v1.2.1