diff options
author | Manuel Bottini <manuel.bottini@arm.com> | 2020-05-04 18:42:32 +0100 |
---|---|---|
committer | Manuel Bottini <manuel.bottini@arm.com> | 2020-05-05 17:21:50 +0000 |
commit | 77b8859688c333bca35ebc8ca4d0b2652f480c4a (patch) | |
tree | 2a9d997bc148f33d6a68b5ed28e88250e40a977f /tests | |
parent | 939586e8fd05923cab63b7b79bf5040cca5b8a7b (diff) | |
download | ComputeLibrary-77b8859688c333bca35ebc8ca4d0b2652f480c4a.tar.gz |
COMPMID-3443: Android R CTS 1.3 QASYMM8_SIGNED failure with MEAN on CpuAcc
- Properly perform the division of the sum of the elements by the interested dimension of the tensor
Change-Id: I0a30be4e5e6cfc1bd24a17eb4307dd2acf98db3e
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3145
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/validation/NEON/ReduceMean.cpp | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/tests/validation/NEON/ReduceMean.cpp b/tests/validation/NEON/ReduceMean.cpp index fade3613da..821171a603 100644 --- a/tests/validation/NEON/ReduceMean.cpp +++ b/tests/validation/NEON/ReduceMean.cpp @@ -44,9 +44,10 @@ namespace { constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance<float> tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance<uint8_t> tolerance_quantized(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ +constexpr AbsoluteTolerance<float> tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1); /**< Tolerance value for comparing reference's output against implementation's output for unsigned 8-bit asymmetric quantized type */ +constexpr AbsoluteTolerance<int8_t> tolerance_s8(1); /**< Tolerance value for comparing reference's output against implementation's output for signed 8-bit asymmetric quantized type */ const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }), framework::dataset::make("KeepDims", { true })); @@ -162,7 +163,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_u8); } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -171,7 +172,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_u8); } TEST_SUITE_END() // QASYMM8 @@ -179,10 +180,10 @@ TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, NEReduceMeanQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, 0) }))) + combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, -10), QuantizationInfo(1.f / 250, -20) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_s8); } FIXTURE_DATA_TEST_CASE(RunLarge, NEReduceMeanQuantizedFixture<int8_t>, @@ -190,7 +191,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, 0) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_quantized); + validate(Accessor(_target), _reference, tolerance_s8); } TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized |