diff options
author | Pablo Tello <pablo.tello@arm.com> | 2019-03-04 14:14:02 +0000 |
---|---|---|
committer | Gian Marco Iodice <gianmarco.iodice@arm.com> | 2019-03-19 10:18:10 +0000 |
commit | 3dd5b6884a65c06bcb9d15589ee2dc2978e3b336 (patch) | |
tree | e45ccae66b69c8db853ac883080c1c6358a57aec /src | |
parent | 2f7c149f36fa3e6296aba6de666962947f032558 (diff) | |
download | ComputeLibrary-3dd5b6884a65c06bcb9d15589ee2dc2978e3b336.tar.gz |
COMPMID-1933: Implement NEHeightConcatenateLayer.
Added support to concactenate tensors along the Y axis in NEConcatenateLayer.
Change-Id: Ib714bfcf9954cc35918efa7d52fc9164bb08bdf6
Signed-off-by: Pablo Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/841
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
5 files changed, 236 insertions, 15 deletions
diff --git a/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp new file mode 100644 index 0000000000..b8e204cfd8 --- /dev/null +++ b/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEHeightConcatenateLayerKernel.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/NEAsymm.h" +#include "arm_compute/core/NEON/wrapper/wrapper.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" + +#include <cstdint> + +using namespace arm_compute; + +namespace +{ +std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) +{ + const unsigned int num_elems_processed_per_iteration = 16 / output->element_size(); + + // The window needs to be based on input as we copy all the widths of input + Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration)); + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); + AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); + bool window_changed = update_window_and_padding(win, input_access, output_access); + + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); +} + +Status validate_arguments(const ITensorInfo *input, unsigned int height_offset, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions. + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, + DataType::U8, DataType::S8, DataType::QASYMM8, + DataType::U16, DataType::S16, DataType::F16, + DataType::U32, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimX) != output->dimension(Window::DimX)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(Window::DimY) + height_offset > output->dimension(Window::DimY)); + for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i) + { + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(i) != output->dimension(i)); + } + + return Status{}; +} +} // namespace + +NEHeightConcatenateLayerKernel::NEHeightConcatenateLayerKernel() + : _input(nullptr), _output(nullptr), _height_offset(0) +{ +} + +void NEHeightConcatenateLayerKernel::configure(const ITensor *input, unsigned int height_offset, ITensor *output) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), height_offset, output->info())); + + _input = input; + _output = output; + _height_offset = height_offset; + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), output->info()); + ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); + + INEKernel::configure(std::get<1>(win_config)); +} + +Status NEHeightConcatenateLayerKernel::validate(const ITensorInfo *input, unsigned int height_offset, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, height_offset, output)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first); + return Status{}; +} + +void NEHeightConcatenateLayerKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + // Offset output pointer to the correct position + uint8_t *output_ptr = _output->buffer() + _output->info()->offset_first_element_in_bytes() + _height_offset * _output->info()->strides_in_bytes()[Window::DimY]; + + // Create iterators + Iterator input(_input, window); + Iterator output(_output, window); + const DataType dt = _input->info()->data_type(); + const QuantizationInfo &input_qinfo = _input->info()->quantization_info(); + const QuantizationInfo &output_qinfo = _output->info()->quantization_info(); + if(dt == DataType::QASYMM8 && input_qinfo != output_qinfo) + { + execute_window_loop(window, [&](const Coordinates &) + { + vst1q_u8(output_ptr + output.offset(), vquantize(vdequantize(vld1q_u8(input.ptr()), input_qinfo), output_qinfo)); + }, + input, output); + } + else + { + execute_window_loop(window, [&](const Coordinates &) + { + const auto in_ptr = input.ptr(); + const auto out_ptr = output_ptr + output.offset(); + + wrapper::vstore(out_ptr, wrapper::vloadq(in_ptr)); + }, + input, output); + } +} diff --git a/src/graph/nodes/ConcatenateLayerNode.cpp b/src/graph/nodes/ConcatenateLayerNode.cpp index ade3f6e1a9..3ce09d0073 100644 --- a/src/graph/nodes/ConcatenateLayerNode.cpp +++ b/src/graph/nodes/ConcatenateLayerNode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -71,10 +71,9 @@ TensorDescriptor ConcatenateLayerNode::compute_output_descriptor(const std::vect shapes.emplace_back(&input_descriptor.shape); } - // Calculate output shape - if(axis_idx == 0) + if(axis_idx < 2) { - output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(shapes); + output_descriptor.shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(shapes, axis_idx); } else if(axis_idx == 2) { @@ -138,4 +137,4 @@ void ConcatenateLayerNode::accept(INodeVisitor &v) v.visit(*this); } } // namespace graph -} // namespace arm_compute
\ No newline at end of file +} // namespace arm_compute diff --git a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp index d0801a6768..6e42377a07 100644 --- a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp @@ -51,7 +51,7 @@ Status CLWidthConcatenateLayer::validate(const std::vector<ITensorInfo *> &input // Output auto inizialitation if not yet initialized TensorInfo tmp_output_info = *output->clone(); - const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector); + const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimX); auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); switch(num_inputs) @@ -90,7 +90,7 @@ void CLWidthConcatenateLayer::configure(std::vector<ICLTensor *> inputs_vector, { inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); } - const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector); + const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimX); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp index 21ab47d3fe..f764a126a0 100644 --- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -26,6 +26,9 @@ #include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h" #include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/NEON/NEScheduler.h" + #include "arm_compute/core/Error.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/TensorInfo.h" @@ -35,15 +38,66 @@ namespace arm_compute { NEConcatenateLayer::NEConcatenateLayer() - : _concat_function(nullptr) + : _concat_function(nullptr), + _hconcat_kernels(), + _num_inputs(0), + _axis(Window::DimX) { } +Status NEConcatenateLayer::validate_h_concatenate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2); + + // Output auto inizialitation if not yet initialized + TensorInfo tmp_output_info = *output->clone(); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY); + auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); + + unsigned int offset = 0; + for(const auto &input : inputs_vector) + { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + offset += input->dimension(Window::DimY); + } + + return Status{}; +} + +void NEConcatenateLayer::configure_h_concatenate(std::vector<ITensor *> inputs_vector, ITensor *output) +{ + _num_inputs = inputs_vector.size(); + + std::vector<ITensorInfo *> inputs_vector_info; + for(unsigned int i = 0; i < _num_inputs; ++i) + { + ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i)); + inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); + } + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); + ARM_COMPUTE_ERROR_THROW_ON(validate_h_concatenate(inputs_vector_info, output->info())); + + unsigned int offset = 0; + + _hconcat_kernels = arm_compute::support::cpp14::make_unique<NEHeightConcatenateLayerKernel[]>(_num_inputs); + + for(unsigned int i = 0; i < _num_inputs; ++i) + { + _hconcat_kernels[i].configure(inputs_vector.at(i), offset, output); + offset += inputs_vector.at(i)->info()->dimension(Window::DimY); + } +} + void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis) { ARM_COMPUTE_ERROR_ON(output == nullptr); - - switch(get_data_layout_dimension_index(output->info()->data_layout(), axis)) + _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis); + switch(_axis) { case 0: { @@ -52,6 +106,11 @@ void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, _concat_function = std::move(func); break; } + case 1: + { + configure_h_concatenate(inputs_vector, output); + break; + } case 2: { auto func = support::cpp14::make_unique<NEDepthConcatenateLayer>(); @@ -73,6 +132,9 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec case 0: ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayer::validate(inputs_vector, output)); break; + case 1: + ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate_h_concatenate(inputs_vector, output)); + break; case 2: ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayer::validate(inputs_vector, output)); break; @@ -84,7 +146,28 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec void NEConcatenateLayer::run() { - ARM_COMPUTE_ERROR_ON(_concat_function == nullptr); - _concat_function->run(); + switch(_axis) + { + case 0: + case 2: + { + ARM_COMPUTE_ERROR_ON(_concat_function == nullptr); + _concat_function->run(); + break; + } + case 1: + { + for(unsigned i = 0; i < _num_inputs; ++i) + { + NEScheduler::get().schedule(_hconcat_kernels.get() + i, Window::DimY); + } + break; + } + default: + { + ARM_COMPUTE_ERROR("Axis not supported."); + break; + } + } } } // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp index 17c352b8f3..9fce13cbd7 100644 --- a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp @@ -48,7 +48,7 @@ inline Status NEWidthConcatenateLayer::validate_internal(const std::vector<Tenso // Output auto inizialitation if not yet initialized TensorInfo tmp_output_info = *output->clone(); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimX); auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); unsigned int width_offset = 0; @@ -71,7 +71,7 @@ inline void NEWidthConcatenateLayer::configure_internal(std::vector<TensorType * { inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); } - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector); + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimX); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); |