aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-12 13:15:58 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-15 16:52:22 +0000
commit9e4824c909b14dbaf7106e9527b0ffa22ef09bdc (patch)
treeb1cc8f6a8b275a7e227e305f1b02870d5e0f30ec /src/runtime/NEON
parentd66094e37ecd747e85f30130e1a678bdbaf30788 (diff)
downloadComputeLibrary-9e4824c909b14dbaf7106e9527b0ffa22ef09bdc.tar.gz
COMPMID-2111: ConcatenateLayer API should accept an index instead of an enum
Alters the concatenate layer to be layout agnostic and accept an index as thec concatenation axis instead of an typed layout dependent enumeration. Change-Id: I0eaaf919f66a1ba1b09bbfb47c171fc1d4045530 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/994 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON')
-rw-r--r--src/runtime/NEON/functions/NEConcatenateLayer.cpp15
-rw-r--r--src/runtime/NEON/functions/NEPadLayer.cpp2
2 files changed, 8 insertions, 9 deletions
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index fa7b91c3ca..e02c0c2c7a 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -44,10 +44,10 @@ NEConcatenateLayer::NEConcatenateLayer()
{
}
-void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis)
+void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, size_t axis)
{
ARM_COMPUTE_ERROR_ON(output == nullptr);
- _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis);
+ _axis = axis;
_num_inputs = inputs_vector.size();
std::vector<ITensorInfo *> inputs_vector_info;
@@ -104,22 +104,21 @@ void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector,
}
}
-Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis)
+Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
- const unsigned int _axis = get_data_layout_dimension_index(inputs_vector[0]->data_layout(), axis);
// Output auto inizialitation if not yet initialized
TensorInfo tmp_output_info = *output->clone();
TensorShape output_shape{};
- if(_axis == Window::DimZ)
+ if(axis == Window::DimZ)
{
output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
}
else
{
- output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
+ output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
}
auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
@@ -127,7 +126,7 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
for(const auto &input : inputs_vector)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- switch(_axis)
+ switch(axis)
{
case Window::DimX:
{
@@ -147,7 +146,7 @@ Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vec
default:
ARM_COMPUTE_ERROR("Axis not supported");
}
- offset += input->dimension(_axis);
+ offset += input->dimension(axis);
}
return Status{};
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index 62a7d4559b..6af2ee8868 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -182,7 +182,7 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu
}
// Concatenate the padding before and after with the input.
ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i];
- _concat_functions[i].configure(concat_vector, out, get_index_data_layout_dimension(input->info()->data_layout(), i));
+ _concat_functions[i].configure(concat_vector, out, i);
if(i != _num_dimensions - 1)
{
_concat_results[i].allocator()->allocate();