From c6173fe9b06637415a20b5d8d19ff286ea198c6e Mon Sep 17 00:00:00 2001 From: Luca Foschiani Date: Thu, 20 Feb 2020 12:19:12 +0000 Subject: COMPMID-3212 Investigate CLReduceMean failure with QASYMM8 Signed-off-by: Luca Foschiani Change-Id: I96d8cbef2e167e09484b0e9b5d8a12ffee0f6e67 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2755 Comments-Addressed: Arm Jenkins Reviewed-by: Michalis Spyrou Tested-by: Arm Jenkins --- tests/validation/CL/ReduceMean.cpp | 6 ++--- tests/validation/NEON/ReduceMean.cpp | 6 ++--- tests/validation/reference/ReductionOperation.cpp | 32 +++++++++++++++++------ 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/tests/validation/CL/ReduceMean.cpp b/tests/validation/CL/ReduceMean.cpp index 5069711296..55594eda4a 100644 --- a/tests/validation/CL/ReduceMean.cpp +++ b/tests/validation/CL/ReduceMean.cpp @@ -44,7 +44,7 @@ namespace { constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ constexpr AbsoluteTolerance tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ -constexpr AbsoluteTolerance tolerance_qasymm8(2); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ +constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }), framework::dataset::make("KeepDims", { true })); @@ -133,7 +133,7 @@ TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, CLReduceMeanQuantizedFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) + combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -142,7 +142,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, FIXTURE_DATA_TEST_CASE(RunLarge, CLReduceMeanQuantizedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) + combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); diff --git a/tests/validation/NEON/ReduceMean.cpp b/tests/validation/NEON/ReduceMean.cpp index 210c3670d4..782b97220e 100644 --- a/tests/validation/NEON/ReduceMean.cpp +++ b/tests/validation/NEON/ReduceMean.cpp @@ -46,7 +46,7 @@ constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value f #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance tolerance_qasymm8(2); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ +constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }), framework::dataset::make("KeepDims", { true })); @@ -159,7 +159,7 @@ TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, NEReduceMeanQuantizedFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) + combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -168,7 +168,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, FIXTURE_DATA_TEST_CASE(RunLarge, NEReduceMeanQuantizedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) }))) + combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp index 9b35cdf6f5..a0b6a8eaed 100644 --- a/tests/validation/reference/ReductionOperation.cpp +++ b/tests/validation/reference/ReductionOperation.cpp @@ -64,7 +64,7 @@ OT reduce_operation(const T *ptr, int reduce_elements, ReductionOperation op, in if(std::is_integral::value) { - auto int_res = static_cast(res); + auto int_res = static_cast(res); for(int i = 0; i < reduce_elements; ++i) { auto elem = *(ptr + stride * i); @@ -101,7 +101,7 @@ OT reduce_operation(const T *ptr, int reduce_elements, ReductionOperation op, in { int_res /= reduce_elements; } - res = saturate_cast(int_res); + res = static_cast(int_res); } else { @@ -279,9 +279,17 @@ SimpleTensor reduction_operation(const SimpleTensor &src, cons { if(src.data_type() == DataType::QASYMM8) { - SimpleTensor src_f = convert_from_asymmetric(src); - SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op); - return convert_to_asymmetric(dst_f, src.quantization_info()); + // If the operation is MEAN_SUM, we can directly use the uint8 implementation without taking into account scale and offset + if(op == ReductionOperation::MEAN_SUM) + { + return compute_reduction_operation(src, dst_shape, axis, op); + } + else + { + SimpleTensor src_f = convert_from_asymmetric(src); + SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op); + return convert_to_asymmetric(dst_f, src.quantization_info()); + } } else { @@ -294,9 +302,17 @@ SimpleTensor reduction_operation(const SimpleTensor &src, const { if(src.data_type() == DataType::QASYMM8_SIGNED) { - SimpleTensor src_f = convert_from_asymmetric(src); - SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op); - return convert_to_asymmetric(dst_f, src.quantization_info()); + // If the operation is MEAN_SUM, we can directly use the int8 implementation without taking into account scale and offset + if(op == ReductionOperation::MEAN_SUM) + { + return compute_reduction_operation(src, dst_shape, axis, op); + } + else + { + SimpleTensor src_f = convert_from_asymmetric(src); + SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op); + return convert_to_asymmetric(dst_f, src.quantization_info()); + } } else { -- cgit v1.2.1