From 7e5a86535cd8702e8cb06be5277c289be37ead9c Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Tue, 12 Jan 2021 13:49:07 +0000 Subject: Add tolerance for quantized activations computed in float Some of the activation functions need complex mathematical operations and are implemented by dequantizing to float, performing the activation in the float domain and requantizing back. In such cases, the results may differ slightly between reference and optimized code. In fact, when running validation through valgrind we get a difference of 1 in the results and therefore, an absolute tolerance of 1 is added to the tests. Resolves: COMPMID-4067 Change-Id: Ic2eca5616371b0a324a246d40b515ddc9f576e61 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4841 Reviewed-by: Giorgio Arena Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- tests/validation/NEON/ActivationLayer.cpp | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) (limited to 'tests') diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 45e09fb94f..456b96d0ea 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -136,12 +136,27 @@ AbsoluteTolerance absolute_tolerance(DataType data_type, ActivationLayerI } } -/** Tolerance for quantized asymmetric operations */ -#if(!defined(__aarch64__) || defined(__ARM_FEATURE_SVE2)) -constexpr AbsoluteTolerance tolerance_qasymm8(1); -#else // !(!defined(__aarch64__) || defined(__ARM_FEATURE_SVE2)) -constexpr AbsoluteTolerance tolerance_qasymm8(0); -#endif // defined(__aarch64__) +/** Define absolute tolerance of the activation layer for qasymm8. + * + * @param[in] activation The activation function used. + * + * @return Absolute tolerance depending on the activation function. + */ +AbsoluteTolerance tolerance_qasymm8(ActivationLayerInfo::ActivationFunction activation) +{ + switch(activation) + { + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + case ActivationLayerInfo::ActivationFunction::SQRT: + case ActivationLayerInfo::ActivationFunction::TANH: + case ActivationLayerInfo::ActivationFunction::HARD_SWISH: + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: + return AbsoluteTolerance(1); + default: + return AbsoluteTolerance(0); + } +} constexpr AbsoluteTolerance tolerance_qsymm16(1); @@ -285,7 +300,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, fra framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); + validate(Accessor(_target), _reference, tolerance_qasymm8(_function)); } TEST_SUITE_END() // QASYMM8 @@ -296,7 +311,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, fram framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); + validate(Accessor(_target), _reference, tolerance_qasymm8(_function)); } TEST_SUITE_END() // QASYMM8_SIGNED -- cgit v1.2.1