aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2019-12-11 13:04:34 +0000
committerSiCongLi <sicong.li@arm.com>2019-12-11 19:18:07 +0000
commit99d619561755a74f205188c1857de0ec3406c34c (patch)
tree58057b319f70b24ecd9be366028d255e1bc26357
parent6f58d1ec864b9a4960181ceeab177cd0db54e2e1 (diff)
downloadComputeLibrary-99d619561755a74f205188c1857de0ec3406c34c.tar.gz
COMPMID-2855: CLReduceMean throws error for invalid configs
Signed-off-by: Pablo Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/2452 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Change-Id: I7cda1b67aa6c3541fdd7781be12288c8fc36ffeb
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h36
-rw-r--r--arm_compute/runtime/CL/functions/CLReduceMean.h2
-rw-r--r--src/runtime/CL/functions/CLReduceMean.cpp120
-rw-r--r--src/runtime/NEON/functions/NEReduceMean.cpp12
-rw-r--r--tests/validation/CL/ReduceMean.cpp22
5 files changed, 130 insertions, 62 deletions
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 65a2a1edf4..698a2b7a45 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -39,6 +39,42 @@ namespace misc
{
namespace shape_calculator
{
+/** Calculate the output tensor shape for the reduce mean operation
+ *
+ * @param[in] input Input tensor shape
+ * @param[in] reduction_axis Reduction axis
+ * @param[in] keep_dims Flag to indicate if dimensions are kept
+ *
+ * @return the calculated shape
+ */
+inline TensorShape calculate_reduce_mean_shape(ITensor *input, const Coordinates &reduction_axis, bool keep_dims)
+{
+ const int reduction_ops = reduction_axis.num_dimensions();
+ Coordinates axis_local = reduction_axis;
+ const int input_dims = input->info()->num_dimensions();
+ convert_negative_axis(axis_local, input_dims);
+ TensorShape out_shape = input->info()->tensor_shape();
+ // Configure reshape layer if we want to drop the dimensions
+ if(!keep_dims)
+ {
+ // We have to sort the reduction axis vectors in order for remove_dimension
+ // to work properly
+ std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
+ for(int i = 0; i < reduction_ops; ++i)
+ {
+ out_shape.remove_dimension(axis_local[i] - i);
+ }
+ return out_shape;
+ }
+ else
+ {
+ for(int i = 0; i < reduction_ops; ++i)
+ {
+ out_shape.set(axis_local[i], 1);
+ }
+ return out_shape;
+ }
+}
/** Calculate the output tensor shape of a vector input given the convolution dimensions
*
* @param[in] input Input tensor shape
diff --git a/arm_compute/runtime/CL/functions/CLReduceMean.h b/arm_compute/runtime/CL/functions/CLReduceMean.h
index 9c087eadf1..6836ba3f58 100644
--- a/arm_compute/runtime/CL/functions/CLReduceMean.h
+++ b/arm_compute/runtime/CL/functions/CLReduceMean.h
@@ -71,7 +71,7 @@ private:
std::vector<CLReductionOperation> _reduction_kernels;
std::vector<CLTensor> _reduced_outs;
CLReshapeLayer _reshape;
- unsigned int _reduction_ops;
+ int _reduction_ops;
bool _keep_dims;
};
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLReduceMean.cpp b/src/runtime/CL/functions/CLReduceMean.cpp
index a3634cd46e..c5de43da35 100644
--- a/src/runtime/CL/functions/CLReduceMean.cpp
+++ b/src/runtime/CL/functions/CLReduceMean.cpp
@@ -26,20 +26,81 @@
#include "arm_compute/core/CL/CLValidate.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
+#include "arm_compute/core/Error.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/helpers/tensor_transform.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "support/ToolchainSupport.h"
namespace arm_compute
{
+namespace
+{
+Status validate_config(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
+{
+ ARM_COMPUTE_UNUSED(keep_dims);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() < 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions());
+
+ const unsigned int reduction_ops = reduction_axis.num_dimensions();
+ const int input_dims = input->num_dimensions();
+ Coordinates axis_local = reduction_axis;
+
+ for(unsigned int i = 0; i < axis_local.num_dimensions(); ++i)
+ {
+ //axis: The dimensions to reduce. Must be in the range [-rank(input_tensor), rank(input_tensor)).
+ ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] < (-static_cast<int>(input->num_dimensions())));
+ ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] >= static_cast<int>(input->num_dimensions()));
+ }
+
+ if(output->tensor_shape().total_size() != 0)
+ {
+ // Only validate if not using auto_init for the output tensor
+ TensorShape out_shape = input->tensor_shape();
+ // Validate output_shape only if not using auto_init
+ convert_negative_axis(axis_local, input_dims);
+ std::sort(axis_local.begin(), axis_local.begin() + reduction_ops);
+ for(unsigned int i = 0; i < reduction_ops; ++i)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(axis_local[i] > 3);
+ ARM_COMPUTE_RETURN_ERROR_ON(static_cast<unsigned int>(axis_local[i]) > input->num_dimensions() - 1);
+ if(output->total_size() > 0 && keep_dims)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(axis_local[i]) != 1);
+ }
+ if(keep_dims)
+ {
+ out_shape.set(axis_local[i], 1);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(i > static_cast<unsigned int>(axis_local[i]));
+ const unsigned int remove_index = axis_local[i] - i;
+ ARM_COMPUTE_RETURN_ERROR_ON(remove_index >= out_shape.num_dimensions());
+ out_shape.remove_dimension(remove_index);
+ }
+ }
+ const TensorInfo out_info = input->clone()->set_tensor_shape(out_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &out_info);
+ }
+ return Status{};
+}
+}
CLReduceMean::CLReduceMean(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(), _reduction_ops(), _keep_dims()
{
}
void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis, bool keep_dims, ICLTensor *output)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ // Perform validate step
+ ARM_COMPUTE_ERROR_THROW_ON(CLReduceMean::validate(input->info(), reduction_axis, keep_dims, output->info()));
+ // Output auto inizialitation if not yet initialized
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_reduce_mean_shape(input, reduction_axis, keep_dims);
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
_reduction_ops = reduction_axis.num_dimensions();
_reduction_kernels.resize(_reduction_ops);
@@ -49,14 +110,10 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
Coordinates axis_local = reduction_axis;
const int input_dims = input->info()->num_dimensions();
- // Convert negative axis
- for(unsigned int i = 0; i < _reduction_ops; ++i)
- {
- axis_local[i] = wrap_around(axis_local[i], input_dims);
- }
+ convert_negative_axis(axis_local, input_dims);
// Perform reduction for every axis
- for(unsigned int i = 0; i < _reduction_ops; ++i)
+ for(int i = 0; i < _reduction_ops; ++i)
{
TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
out_shape.set(axis_local[i], 1);
@@ -75,7 +132,7 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
}
// Allocate intermediate tensors
- for(unsigned int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i)
+ for(int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i)
{
_reduced_outs[i].allocator()->allocate();
}
@@ -88,7 +145,7 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
// We have to sort the reduction axis vectors in order for remove_dimension
// to work properly
std::sort(axis_local.begin(), axis_local.begin() + _reduction_ops);
- for(unsigned int i = 0; i < _reduction_ops; ++i)
+ for(int i = 0; i < _reduction_ops; ++i)
{
out_shape.remove_dimension(axis_local[i] - i);
}
@@ -99,55 +156,16 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
Status CLReduceMean::validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions());
-
- TensorShape out_shape = input->tensor_shape();
-
- Coordinates axis_sorted = reduction_axis;
- const unsigned int reduction_ops = reduction_axis.num_dimensions();
- const int input_dims = input->num_dimensions();
-
- // Convert negative axis
- for(unsigned int i = 0; i < reduction_ops; ++i)
- {
- axis_sorted[i] = wrap_around(axis_sorted[i], input_dims);
- }
-
- std::sort(axis_sorted.begin(), axis_sorted.begin() + reduction_ops);
- for(unsigned int i = 0; i < reduction_ops; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(axis_sorted[i] > 3);
- ARM_COMPUTE_RETURN_ERROR_ON(static_cast<unsigned int>(axis_sorted[i]) > input->num_dimensions() - 1);
- if(output->total_size() > 0 && keep_dims)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(axis_sorted[i]) != 1);
- }
- if(keep_dims)
- {
- out_shape.set(axis_sorted[i], 1);
- }
- else
- {
- out_shape.remove_dimension(axis_sorted[i] - i);
- }
- }
-
- const TensorInfo out_info = input->clone()->set_tensor_shape(out_shape);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &out_info);
-
- return Status{};
+ return validate_config(input, reduction_axis, keep_dims, output);
}
void CLReduceMean::run()
{
MemoryGroupResourceScope scope_mg(_memory_group);
- for(unsigned int i = 0; i < _reduction_ops; ++i)
+ for(auto &kernel : _reduction_kernels)
{
- _reduction_kernels[i].run();
+ kernel.run();
}
if(!_keep_dims)
diff --git a/src/runtime/NEON/functions/NEReduceMean.cpp b/src/runtime/NEON/functions/NEReduceMean.cpp
index 4547a1f9b0..96ec8b8587 100644
--- a/src/runtime/NEON/functions/NEReduceMean.cpp
+++ b/src/runtime/NEON/functions/NEReduceMean.cpp
@@ -26,9 +26,11 @@
#include "arm_compute/core/CPP/Validate.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
-using namespace arm_compute;
+namespace arm_compute
+{
NEReduceMean::NEReduceMean(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(), _reduction_ops(), _keep_dims()
@@ -95,7 +97,11 @@ Status NEReduceMean::validate(const ITensorInfo *input, const Coordinates &reduc
void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis, bool keep_dims, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ // Perform validate step
+ ARM_COMPUTE_ERROR_THROW_ON(NEReduceMean::validate(input->info(), reduction_axis, keep_dims, output->info()));
+ // Output auto inizialitation if not yet initialized
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_reduce_mean_shape(input, reduction_axis, keep_dims);
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
_reduction_ops = reduction_axis.num_dimensions();
_reduction_kernels.resize(_reduction_ops);
@@ -161,3 +167,5 @@ void NEReduceMean::run()
_reshape.run();
}
}
+
+} // namespace arm_compute
diff --git a/tests/validation/CL/ReduceMean.cpp b/tests/validation/CL/ReduceMean.cpp
index cfd4a2730c..1b7400bf53 100644
--- a/tests/validation/CL/ReduceMean.cpp
+++ b/tests/validation/CL/ReduceMean.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,20 +55,26 @@ TEST_SUITE(ReduceMean)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid axis
TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid output shape
- TensorInfo(TensorShape(32U, 16U, 16U, 2U), 1, DataType::F32)
+ TensorInfo(TensorShape(32U, 16U, 16U, 2U), 1, DataType::F32),// OK
+ TensorInfo(TensorShape{228U, 19U, 2U, 2U}, 1, DataType::F32),// OK
+ TensorInfo(TensorShape{228U, 19U, 2U, 1U}, 1, DataType::F32) // Cannot support axis 3 not valid
}),
framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::F32)
+ TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(19U), 1, DataType::F32),
+ TensorInfo(TensorShape(19U), 1, DataType::F32)
+
})),
- framework::dataset::make("Axis", { Coordinates(4), Coordinates(0,2), Coordinates(2) })),
- framework::dataset::make("Expected", { false, false, true })),
- input_info, output_info, axis, expected)
+ framework::dataset::make("Axis", { Coordinates(4), Coordinates(0,2), Coordinates(2), Coordinates(3,2,0), Coordinates(3,2,0) })),
+ framework::dataset::make("Keep", { true, true, true, false, false })),
+ framework::dataset::make("Expected", { false, false, true, true, false })),
+ input_info, output_info, axis, keep, expected)
{
- const Status status = CLReduceMean::validate(&input_info.clone()->set_is_resizable(false), axis, true, &output_info.clone()->set_is_resizable(false));
+ const Status status = CLReduceMean::validate(&input_info.clone()->set_is_resizable(false), axis, keep, &output_info.clone()->set_is_resizable(false));
ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
}
// clang-format on