From 151fa87bd5536fe224e67680078d84e058e50b4d Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 6 Jun 2019 14:44:44 +0100 Subject: COMPMID-2393: Fix QASYMM8 activation failures. Allowing a tolerance of 1 for armv7 architectures as rounding mode defaults to zero and cannot be easily controlled. Change-Id: I9408e69c705b549c76c185aa6843cfb9ec52f3ae Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1306 Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- tests/validation/NEON/ActivationLayer.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'tests/validation/NEON') diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 1d8fffa903..a5030b94b7 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -97,6 +97,13 @@ AbsoluteTolerance absolute_tolerance(DataType data_type, ActivationLayerI } } +/** Tolerance for quantized asymmetric operations */ +#if defined(__aarch64__) +constexpr AbsoluteTolerance tolerance_qasymm8(0); +#else // defined(__aarch64__) +constexpr AbsoluteTolerance tolerance_qasymm8(1); +#endif // defined(__aarch64__) + /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", { @@ -227,7 +234,7 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ ActivationLayerInfo::ActivationFunction::TANH }); -const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), QuantizedActivationFunctionsDataset), +const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), QuantizedActivationFunctionsDataset), framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); TEST_SUITE(Quantized) @@ -238,7 +245,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, fra framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output - validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, tolerance_qasymm8); } FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset), framework::dataset::make("DataType", @@ -246,7 +253,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, fra framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output - validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, tolerance_qasymm8); } TEST_SUITE_END() // QASYMM8 TEST_SUITE_END() // Quantized -- cgit v1.2.1