aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2019-03-04 14:14:02 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2019-03-19 10:18:10 +0000
commit3dd5b6884a65c06bcb9d15589ee2dc2978e3b336 (patch)
treee45ccae66b69c8db853ac883080c1c6358a57aec /src/runtime/NEON/functions
parent2f7c149f36fa3e6296aba6de666962947f032558 (diff)
downloadComputeLibrary-3dd5b6884a65c06bcb9d15589ee2dc2978e3b336.tar.gz
COMPMID-1933: Implement NEHeightConcatenateLayer.
Added support to concactenate tensors along the Y axis in NEConcatenateLayer. Change-Id: Ib714bfcf9954cc35918efa7d52fc9164bb08bdf6 Signed-off-by: Pablo Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/841 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions')
-rw-r--r--src/runtime/NEON/functions/NEConcatenateLayer.cpp95
-rw-r--r--src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp4
2 files changed, 91 insertions, 8 deletions
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index 21ab47d3fe..f764a126a0 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,9 @@
#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
#include "arm_compute/core/Error.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/TensorInfo.h"
@@ -35,15 +38,66 @@
namespace arm_compute
{
NEConcatenateLayer::NEConcatenateLayer()
- : _concat_function(nullptr)
+ : _concat_function(nullptr),
+ _hconcat_kernels(),
+ _num_inputs(0),
+ _axis(Window::DimX)
{
}
+Status NEConcatenateLayer::validate_h_concatenate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
+ ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
+
+ // Output auto inizialitation if not yet initialized
+ TensorInfo tmp_output_info = *output->clone();
+ TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY);
+ auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
+
+ unsigned int offset = 0;
+ for(const auto &input : inputs_vector)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
+ ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+ offset += input->dimension(Window::DimY);
+ }
+
+ return Status{};
+}
+
+void NEConcatenateLayer::configure_h_concatenate(std::vector<ITensor *> inputs_vector, ITensor *output)
+{
+ _num_inputs = inputs_vector.size();
+
+ std::vector<ITensorInfo *> inputs_vector_info;
+ for(unsigned int i = 0; i < _num_inputs; ++i)
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i));
+ inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
+ }
+ TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY);
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());
+ ARM_COMPUTE_ERROR_THROW_ON(validate_h_concatenate(inputs_vector_info, output->info()));
+
+ unsigned int offset = 0;
+
+ _hconcat_kernels = arm_compute::support::cpp14::make_unique<NEHeightConcatenateLayerKernel[]>(_num_inputs);
+
+ for(unsigned int i = 0; i < _num_inputs; ++i)
+ {
+ _hconcat_kernels[i].configure(inputs_vector.at(i), offset, output);
+ offset += inputs_vector.at(i)->info()->dimension(Window::DimY);
+ }
+}
+
void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis)
{
ARM_COMPUTE_ERROR_ON(output == nullptr);
-
- switch(get_data_layout_dimension_index(output->info()->data_layout(), axis))
+ _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis);
+ switch(_axis)
{
case 0:
{
@@ -52,6 +106,11 @@ void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector,
_concat_function = std::move(func);
break;
}
+ case 1:
+ {
+ configure_h_concatenate(inputs_vector, output);
+ break;
+ }
case 2:
{
auto func = support::cpp14::make_unique<NEDepthConcatenateLayer>();
@@ -73,6 +132,9 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
case 0:
ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayer::validate(inputs_vector, output));
break;
+ case 1:
+ ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate_h_concatenate(inputs_vector, output));
+ break;
case 2:
ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayer::validate(inputs_vector, output));
break;
@@ -84,7 +146,28 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
void NEConcatenateLayer::run()
{
- ARM_COMPUTE_ERROR_ON(_concat_function == nullptr);
- _concat_function->run();
+ switch(_axis)
+ {
+ case 0:
+ case 2:
+ {
+ ARM_COMPUTE_ERROR_ON(_concat_function == nullptr);
+ _concat_function->run();
+ break;
+ }
+ case 1:
+ {
+ for(unsigned i = 0; i < _num_inputs; ++i)
+ {
+ NEScheduler::get().schedule(_hconcat_kernels.get() + i, Window::DimY);
+ }
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Axis not supported.");
+ break;
+ }
+ }
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
index 17c352b8f3..9fce13cbd7 100644
--- a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
@@ -48,7 +48,7 @@ inline Status NEWidthConcatenateLayer::validate_internal(const std::vector<Tenso
// Output auto inizialitation if not yet initialized
TensorInfo tmp_output_info = *output->clone();
- TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector);
+ TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimX);
auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
unsigned int width_offset = 0;
@@ -71,7 +71,7 @@ inline void NEWidthConcatenateLayer::configure_internal(std::vector<TensorType *
{
inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
}
- TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_width_concatenate_shape(inputs_vector);
+ TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimX);
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());