diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2021-01-12 13:49:07 +0000 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-01-13 16:18:37 +0000 |
commit | 7e5a86535cd8702e8cb06be5277c289be37ead9c (patch) | |
tree | 2f9e40c8caf0fa8027f31967e7e0528ea63164e2 /tests/validation | |
parent | d30405ac6eb38205676dfaa6e875b264caef431d (diff) | |
download | ComputeLibrary-7e5a86535cd8702e8cb06be5277c289be37ead9c.tar.gz |
Add tolerance for quantized activations computed in float
Some of the activation functions need complex mathematical operations
and are implemented by dequantizing to float, performing the activation
in the float domain and requantizing back. In such cases, the results
may differ slightly between reference and optimized code.
In fact, when running validation through valgrind we get a difference of
1 in the results and therefore, an absolute tolerance of 1 is added to
the tests.
Resolves: COMPMID-4067
Change-Id: Ic2eca5616371b0a324a246d40b515ddc9f576e61
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4841
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/NEON/ActivationLayer.cpp | 31 |
1 files changed, 23 insertions, 8 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 45e09fb94f..456b96d0ea 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -136,12 +136,27 @@ AbsoluteTolerance<float> absolute_tolerance(DataType data_type, ActivationLayerI } } -/** Tolerance for quantized asymmetric operations */ -#if(!defined(__aarch64__) || defined(__ARM_FEATURE_SVE2)) -constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); -#else // !(!defined(__aarch64__) || defined(__ARM_FEATURE_SVE2)) -constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0); -#endif // defined(__aarch64__) +/** Define absolute tolerance of the activation layer for qasymm8. + * + * @param[in] activation The activation function used. + * + * @return Absolute tolerance depending on the activation function. + */ +AbsoluteTolerance<uint8_t> tolerance_qasymm8(ActivationLayerInfo::ActivationFunction activation) +{ + switch(activation) + { + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + case ActivationLayerInfo::ActivationFunction::SQRT: + case ActivationLayerInfo::ActivationFunction::TANH: + case ActivationLayerInfo::ActivationFunction::HARD_SWISH: + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: + return AbsoluteTolerance<uint8_t>(1); + default: + return AbsoluteTolerance<uint8_t>(0); + } +} constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1); @@ -285,7 +300,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<uint8_t>, fra framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); + validate(Accessor(_target), _reference, tolerance_qasymm8(_function)); } TEST_SUITE_END() // QASYMM8 @@ -296,7 +311,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int8_t>, fram framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); + validate(Accessor(_target), _reference, tolerance_qasymm8(_function)); } TEST_SUITE_END() // QASYMM8_SIGNED |