From 2697fd8fa42425f7bfdd60dd486d4c2132b06523 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Tue, 15 Oct 2019 16:49:24 +0100 Subject: COMPMID-2707: add keep_dims parameter to Reduction Operation The added parameter is used to decide whether or not to keep the target dimension of reduction operation. ArgMinMax operations will always remove the reduced dimension. Following things are updated to support the parameter. - [CL/NEON] functions and reference kernel - [CL/NEON] ArgMinMax function to use ReductionOperation function - [CL/NEON] validation test suite for Reduction and ArgMinMax operations to validate the added parameter - ReductionOperationFixture is modified NOT to pre-populate output tensor and now relies on underlying kernel/function. - Adjust CL validation test suite for Reduction operation to remove excessive test cases with axis values beyond input tensor's dimension. Change-Id: I3e24d276ed469a4201f323001708f0c525f11c4f Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/2167 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas --- tests/validation/NEON/ArgMinMax.cpp | 16 +++++++------ tests/validation/NEON/ReductionOperation.cpp | 36 +++++++++++++++++----------- 2 files changed, 31 insertions(+), 21 deletions(-) (limited to 'tests/validation/NEON') diff --git a/tests/validation/NEON/ArgMinMax.cpp b/tests/validation/NEON/ArgMinMax.cpp index 71fb39a30d..642a69ba5f 100644 --- a/tests/validation/NEON/ArgMinMax.cpp +++ b/tests/validation/NEON/ArgMinMax.cpp @@ -24,9 +24,11 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/Traits.h" #include "arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h" +#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h" #include "arm_compute/runtime/Tensor.h" #include "arm_compute/runtime/TensorAllocator.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "tests/NEON/Accessor.h" #include "tests/datasets/ShapeDatasets.h" #include "tests/datasets/SplitDataset.h" @@ -54,7 +56,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( }), framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::U32), + TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::U32), TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::F32) })), framework::dataset::make("Axis", { 4, 0, 2, 0 })), @@ -74,17 +76,17 @@ DATA_TEST_CASE(Configuration, shape, data_type) { // Create tensors - Tensor ref_src = create_tensor(shape, data_type); - Tensor dst; + Tensor ref_src = create_tensor(shape, data_type); + Tensor dst; + const int axis = 1; // Create and Configure function NEArgMinMaxLayer arg_min_max_layer; - arg_min_max_layer.configure(&ref_src, 1, &dst, ReductionOperation::ARG_IDX_MAX); + arg_min_max_layer.configure(&ref_src, axis, &dst, ReductionOperation::ARG_IDX_MAX); // Validate valid region - TensorShape output_shape = shape; - output_shape.set(1, 1); - const ValidRegion valid_region = shape_to_valid_region(output_shape); + const auto expected_output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(shape, axis, false); + const ValidRegion valid_region = shape_to_valid_region(expected_output_shape); validate(dst.info()->valid_region(), valid_region); } diff --git a/tests/validation/NEON/ReductionOperation.cpp b/tests/validation/NEON/ReductionOperation.cpp index 5b697a5efa..3a7f707d23 100644 --- a/tests/validation/NEON/ReductionOperation.cpp +++ b/tests/validation/NEON/ReductionOperation.cpp @@ -66,6 +66,8 @@ const auto QuantizationInfos = framework::dataset::make("QuantizationInfo", const auto Axises = framework::dataset::make("Axis", { 0, 1, 2, 3 }); +const auto KeepDims = framework::dataset::make("KeepDims", { true, false }); + } // namespace TEST_SUITE(NEON) @@ -73,27 +75,31 @@ TEST_SUITE(ReductionOperation) // *INDENT-OFF* // clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1 TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F32 TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions - TensorInfo(TensorShape(128U, 64U), 1, DataType::F32) + TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), + TensorInfo(TensorShape(128U, 64U), 1, DataType::F32) // Kept dimension when keep_dims = false }), framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(1U, 64U), 1, DataType::F16), TensorInfo(TensorShape(1U, 64U), 1, DataType::F32), TensorInfo(TensorShape(1U, 64U), 1, DataType::S16), TensorInfo(TensorShape(1U, 64U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 64U), 1, DataType::F32), TensorInfo(TensorShape(1U, 64U), 1, DataType::F32) })), - framework::dataset::make("Axis", { 0U, 0U, 0U, static_cast(TensorShape::num_max_dimensions), 0U })), - framework::dataset::make("Expected", { false, false, false, false, true })), - input_info, output_info, axis, expected) + framework::dataset::make("Axis", { 0U, 0U, 0U, static_cast(TensorShape::num_max_dimensions), 0U, 0U })), + framework::dataset::make("KeepDims", { true, true, true, true, true, false})), + framework::dataset::make("Expected", { false, false, false, false, true, false })), + input_info, output_info, axis, keep_dims, expected) { bool is_valid = bool(NEReductionOperation::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(true), axis, - ReductionOperation::SUM_SQUARE)); + ReductionOperation::SUM_SQUARE, + keep_dims)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on @@ -104,13 +110,13 @@ using NEReductionOperationFixture = ReductionOperationFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), Axises), ReductionOperations)) + combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), Axises), ReductionOperations), KeepDims)) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), Axises), ReductionOperations)) + combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), Axises), ReductionOperations), KeepDims)) { // Validate output validate(Accessor(_target), _reference, rel_tolerance_f32, 0, tolerance_f32); @@ -122,17 +128,19 @@ using NEReductionOperationQuantizedFixture = ReductionOperationQuantizedFixture< TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationQuantizedFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), Axises), - ReductionOperations), - QuantizationInfos)) + combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), Axises), + ReductionOperations), + QuantizationInfos), + KeepDims)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationQuantizedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), Axises), - ReductionOperations), - QuantizationInfos)) + combine(combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), Axises), + ReductionOperations), + QuantizationInfos), + KeepDims)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); -- cgit v1.2.1