From 77b8859688c333bca35ebc8ca4d0b2652f480c4a Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Mon, 4 May 2020 18:42:32 +0100 Subject: COMPMID-3443: Android R CTS 1.3 QASYMM8_SIGNED failure with MEAN on CpuAcc - Properly perform the division of the sum of the elements by the interested dimension of the tensor Change-Id: I0a30be4e5e6cfc1bd24a17eb4307dd2acf98db3e Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3145 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Georgios Pinitas --- src/core/NEON/kernels/NEReductionOperationKernel.cpp | 3 +-- tests/validation/NEON/ReduceMean.cpp | 17 +++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.cpp b/src/core/NEON/kernels/NEReductionOperationKernel.cpp index 9b8c971a35..afe58ed07d 100644 --- a/src/core/NEON/kernels/NEReductionOperationKernel.cpp +++ b/src/core/NEON/kernels/NEReductionOperationKernel.cpp @@ -724,14 +724,13 @@ struct RedOpX_quantized if(op == ReductionOperation::MEAN_SUM) { - res /= in_info.dimension(0); + res /= static_cast(in_info.dimension(0)); } else { // Subtract accumulated offsets res -= (in_info.dimension(0) - 1) * iq_info.offset; } - *reinterpret_cast(output.ptr()) = utils::cast::saturate_cast(res); } } diff --git a/tests/validation/NEON/ReduceMean.cpp b/tests/validation/NEON/ReduceMean.cpp index fade3613da..821171a603 100644 --- a/tests/validation/NEON/ReduceMean.cpp +++ b/tests/validation/NEON/ReduceMean.cpp @@ -44,9 +44,10 @@ namespace { constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance tolerance_quantized(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ +constexpr AbsoluteTolerance tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_u8(1); /**< Tolerance value for comparing reference's output against implementation's output for unsigned 8-bit asymmetric quantized type */ +constexpr AbsoluteTolerance tolerance_s8(1); /**< Tolerance value for comparing reference's output against implementation's output for signed 8-bit asymmetric quantized type */ const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }), framework::dataset::make("KeepDims", { true })); @@ -162,7 +163,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_u8); } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -171,7 +172,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_u8); } TEST_SUITE_END() // QASYMM8 @@ -179,10 +180,10 @@ TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, NEReduceMeanQuantizedFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, 0) }))) + combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, -10), QuantizationInfo(1.f / 250, -20) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_s8); } FIXTURE_DATA_TEST_CASE(RunLarge, NEReduceMeanQuantizedFixture, @@ -190,7 +191,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, 0) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_s8); } TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized -- cgit v1.2.1