From 8b9ce8cb390f5c96be8b83b3aaa851a49915c173 Mon Sep 17 00:00:00 2001 From: Freddie Liardet Date: Thu, 4 Nov 2021 14:38:03 +0000 Subject: Increase FP16 tolerance for BatchNormalizationLayer Signed-off-by: Freddie Liardet Change-Id: I453fd099f9f5e26f99819121cd93f2178952f236 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6581 Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- tests/validation/NEON/BatchNormalizationLayer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp index a1ae6971f4..50eaf0c667 100644 --- a/tests/validation/NEON/BatchNormalizationLayer.cpp +++ b/tests/validation/NEON/BatchNormalizationLayer.cpp @@ -51,7 +51,7 @@ namespace RelativeTolerance rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ constexpr AbsoluteTolerance abs_tolerance_f32(0.0001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance abs_tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +constexpr AbsoluteTolerance abs_tolerance_f16(0.015f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const auto act_infos = framework::dataset::make("ActivationInfo", -- cgit v1.2.1