aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions/NEReductionOperation.cpp
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2019-10-15 16:49:24 +0100
committerSang-Hoon Park <sang-hoon.park@arm.com>2019-10-30 14:49:34 +0000
commit2697fd8fa42425f7bfdd60dd486d4c2132b06523 (patch)
tree098450f7f60211c7e5bfbd41eb1a7a10c1c0437f /src/runtime/NEON/functions/NEReductionOperation.cpp
parentdf4cf57c7394265b27d051cb1cf0152c53659126 (diff)
downloadComputeLibrary-2697fd8fa42425f7bfdd60dd486d4c2132b06523.tar.gz
COMPMID-2707: add keep_dims parameter to Reduction Operation
The added parameter is used to decide whether or not to keep the target dimension of reduction operation. ArgMinMax operations will always remove the reduced dimension. Following things are updated to support the parameter. - [CL/NEON] functions and reference kernel - [CL/NEON] ArgMinMax function to use ReductionOperation function - [CL/NEON] validation test suite for Reduction and ArgMinMax operations to validate the added parameter - ReductionOperationFixture is modified NOT to pre-populate output tensor and now relies on underlying kernel/function. - Adjust CL validation test suite for Reduction operation to remove excessive test cases with axis values beyond input tensor's dimension. Change-Id: I3e24d276ed469a4201f323001708f0c525f11c4f Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/2167 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions/NEReductionOperation.cpp')
-rw-r--r--src/runtime/NEON/functions/NEReductionOperation.cpp80
1 files changed, 72 insertions, 8 deletions
diff --git a/src/runtime/NEON/functions/NEReductionOperation.cpp b/src/runtime/NEON/functions/NEReductionOperation.cpp
index dc6cf59019..09cd765d4b 100644
--- a/src/runtime/NEON/functions/NEReductionOperation.cpp
+++ b/src/runtime/NEON/functions/NEReductionOperation.cpp
@@ -24,6 +24,7 @@
#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
namespace arm_compute
@@ -52,25 +53,78 @@ size_t reduction_window_split_dimension(unsigned int axis)
}
} // namespace
-NEReductionOperation::NEReductionOperation()
- : _reduction_kernel(), _fill_border_kernel(), _window_split(0), _reduction_axis()
+NEReductionOperation::NEReductionOperation(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(memory_manager), _reduction_kernel(), _fill_border_kernel(), _reshape_kernel(), _output_internal(), _window_split(0), _reduction_axis(), _is_reshape_required(false)
{
}
-Status NEReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+Status NEReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims)
{
- ARM_COMPUTE_RETURN_ON_ERROR(NEReductionOperationKernel::validate(input, output, axis, op));
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
+
+ const auto is_reshape_required = !keep_dims;
+
+ auto *output_internal = output;
+
+ TensorInfo info_before_reshape;
+
+ if(is_reshape_required)
+ {
+ const TensorInfo expected_output_shape = output->clone()->set_tensor_shape(arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, keep_dims));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output_shape, output);
+
+ auto shape_before_reshape = input->tensor_shape();
+ shape_before_reshape.set(axis, 1);
+
+ const auto input_num_channles = input->num_channels();
+ const auto input_qinfo = input->quantization_info();
+ const auto is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
+ const auto output_data_type = is_arg_min_max ? DataType::U32 : output->data_type();
+
+ info_before_reshape.set_data_type(output_data_type).set_tensor_shape(shape_before_reshape).set_num_channels(input_num_channles).set_quantization_info(input_qinfo);
+
+ output_internal = &info_before_reshape;
+ }
+
+ ARM_COMPUTE_RETURN_ON_ERROR(NEReductionOperationKernel::validate(input, output_internal, axis, op));
+
+ if(is_reshape_required)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEReshapeLayerKernel::validate(output_internal, output));
+ }
return Status{};
}
-void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op)
+void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(NEReductionOperation::validate(input->info(), output->info(), axis, op));
+
+ _is_reshape_required = !keep_dims;
+
+ auto *output_internal = output;
+ const auto is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
+
+ if(_is_reshape_required)
+ {
+ const auto output_internal_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis);
+ const auto output_external_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false);
+ const auto output_data_type = is_arg_min_max ? DataType::U32 : input->info()->data_type();
+ const auto num_channels = input->info()->num_channels();
+ const auto qinfo = input->info()->quantization_info();
+
+ _output_internal.allocator()->init(input->info()->clone()->set_data_type(output_data_type).set_tensor_shape(output_internal_shape).reset_padding().set_is_resizable(true).set_num_channels(
+ num_channels).set_quantization_info(qinfo));
+ _memory_group.manage(&_output_internal);
+ output_internal = &_output_internal;
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_data_type).set_tensor_shape(output_external_shape).reset_padding().set_is_resizable(true));
+ }
+
+ ARM_COMPUTE_ERROR_THROW_ON(NEReductionOperation::validate(input->info(), output->info(), axis, op, keep_dims));
// Configure reduction kernel
- _reduction_kernel.configure(input, output, axis, op);
+ _reduction_kernel.configure(input, output_internal, axis, op);
_window_split = reduction_window_split_dimension(axis);
_reduction_axis = axis;
@@ -150,7 +204,13 @@ void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned i
default:
ARM_COMPUTE_ERROR("Reduction Operation unsupported");
}
- _fill_border_kernel.configure(input, fill_border_size, BorderMode::CONSTANT, pixelValue);
+ _fill_border_kernel.configure(input, fill_border_size, (is_arg_min_max ? BorderMode::REPLICATE : BorderMode::CONSTANT), pixelValue);
+ }
+
+ if(_is_reshape_required)
+ {
+ _reshape_kernel.configure(output_internal, output);
+ _output_internal.allocator()->allocate();
}
}
@@ -161,5 +221,9 @@ void NEReductionOperation::run()
NEScheduler::get().schedule(&_fill_border_kernel, Window::DimY);
}
NEScheduler::get().schedule(&_reduction_kernel, _window_split);
+ if(_is_reshape_required)
+ {
+ NEScheduler::get().schedule(&_reshape_kernel, Window::DimY);
+ }
}
} // namespace arm_compute