aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuca Foschiani <luca.foschiani@arm.com>2020-02-20 12:19:12 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2020-02-20 17:23:24 +0000
commitc6173fe9b06637415a20b5d8d19ff286ea198c6e (patch)
tree421aa042797174e46f067de530fbe5969e56df1f
parent2209921c38e3509668a80313c04440a164910f89 (diff)
downloadComputeLibrary-c6173fe9b06637415a20b5d8d19ff286ea198c6e.tar.gz
COMPMID-3212 Investigate CLReduceMean failure with QASYMM8v20.02
Signed-off-by: Luca Foschiani <luca.foschiani@arm.com> Change-Id: I96d8cbef2e167e09484b0e9b5d8a12ffee0f6e67 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2755 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--tests/validation/CL/ReduceMean.cpp6
-rw-r--r--tests/validation/NEON/ReduceMean.cpp6
-rw-r--r--tests/validation/reference/ReductionOperation.cpp32
3 files changed, 30 insertions, 14 deletions
diff --git a/tests/validation/CL/ReduceMean.cpp b/tests/validation/CL/ReduceMean.cpp
index 5069711296..55594eda4a 100644
--- a/tests/validation/CL/ReduceMean.cpp
+++ b/tests/validation/CL/ReduceMean.cpp
@@ -44,7 +44,7 @@ namespace
{
constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */
constexpr AbsoluteTolerance<float> tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
-constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(2); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */
const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }),
framework::dataset::make("KeepDims", { true }));
@@ -133,7 +133,7 @@ TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall,
CLReduceMeanQuantizedFixture<uint8_t>,
framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) })))
+ combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -142,7 +142,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall,
FIXTURE_DATA_TEST_CASE(RunLarge,
CLReduceMeanQuantizedFixture<uint8_t>,
framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) })))
+ combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/NEON/ReduceMean.cpp b/tests/validation/NEON/ReduceMean.cpp
index 210c3670d4..782b97220e 100644
--- a/tests/validation/NEON/ReduceMean.cpp
+++ b/tests/validation/NEON/ReduceMean.cpp
@@ -46,7 +46,7 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value f
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
constexpr AbsoluteTolerance<float> tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(2); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */
const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }),
framework::dataset::make("KeepDims", { true }));
@@ -159,7 +159,7 @@ TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall,
NEReduceMeanQuantizedFixture<uint8_t>,
framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) })))
+ combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -168,7 +168,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall,
FIXTURE_DATA_TEST_CASE(RunLarge,
NEReduceMeanQuantizedFixture<uint8_t>,
framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) })))
+ combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp
index 9b35cdf6f5..a0b6a8eaed 100644
--- a/tests/validation/reference/ReductionOperation.cpp
+++ b/tests/validation/reference/ReductionOperation.cpp
@@ -64,7 +64,7 @@ OT reduce_operation(const T *ptr, int reduce_elements, ReductionOperation op, in
if(std::is_integral<type>::value)
{
- auto int_res = static_cast<uint32_t>(res);
+ auto int_res = static_cast<int32_t>(res);
for(int i = 0; i < reduce_elements; ++i)
{
auto elem = *(ptr + stride * i);
@@ -101,7 +101,7 @@ OT reduce_operation(const T *ptr, int reduce_elements, ReductionOperation op, in
{
int_res /= reduce_elements;
}
- res = saturate_cast<type>(int_res);
+ res = static_cast<type>(int_res);
}
else
{
@@ -279,9 +279,17 @@ SimpleTensor<uint8_t> reduction_operation(const SimpleTensor<uint8_t> &src, cons
{
if(src.data_type() == DataType::QASYMM8)
{
- SimpleTensor<float> src_f = convert_from_asymmetric(src);
- SimpleTensor<float> dst_f = reference::reduction_operation<float, float>(src_f, dst_shape, axis, op);
- return convert_to_asymmetric<uint8_t>(dst_f, src.quantization_info());
+ // If the operation is MEAN_SUM, we can directly use the uint8 implementation without taking into account scale and offset
+ if(op == ReductionOperation::MEAN_SUM)
+ {
+ return compute_reduction_operation<uint8_t, uint8_t>(src, dst_shape, axis, op);
+ }
+ else
+ {
+ SimpleTensor<float> src_f = convert_from_asymmetric(src);
+ SimpleTensor<float> dst_f = reference::reduction_operation<float, float>(src_f, dst_shape, axis, op);
+ return convert_to_asymmetric<uint8_t>(dst_f, src.quantization_info());
+ }
}
else
{
@@ -294,9 +302,17 @@ SimpleTensor<int8_t> reduction_operation(const SimpleTensor<int8_t> &src, const
{
if(src.data_type() == DataType::QASYMM8_SIGNED)
{
- SimpleTensor<float> src_f = convert_from_asymmetric(src);
- SimpleTensor<float> dst_f = reference::reduction_operation<float, float>(src_f, dst_shape, axis, op);
- return convert_to_asymmetric<int8_t>(dst_f, src.quantization_info());
+ // If the operation is MEAN_SUM, we can directly use the int8 implementation without taking into account scale and offset
+ if(op == ReductionOperation::MEAN_SUM)
+ {
+ return compute_reduction_operation<int8_t, int8_t>(src, dst_shape, axis, op);
+ }
+ else
+ {
+ SimpleTensor<float> src_f = convert_from_asymmetric(src);
+ SimpleTensor<float> dst_f = reference::reduction_operation<float, float>(src_f, dst_shape, axis, op);
+ return convert_to_asymmetric<int8_t>(dst_f, src.quantization_info());
+ }
}
else
{