From ada5a197d58a4d20a390ea2c5e984825aa133396 Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Mon, 15 May 2023 14:35:16 +0100 Subject: Raise tolerance number for NEDeconvolutionLayer fp16 tests Tolerance number was too strict for fp16 Resolves COMPMID-6254 Signed-off-by: SiCong Li Change-Id: I42a5df21c2545c38ea7234497effd232b43aabf8 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9635 Reviewed-by: Jakub Sujak Reviewed-by: Omar Al Khatib Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins Tested-by: Arm Jenkins --- tests/validation/NEON/DeconvolutionLayer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp index a42042bcd1..af25543193 100644 --- a/tests/validation/NEON/DeconvolutionLayer.cpp +++ b/tests/validation/NEON/DeconvolutionLayer.cpp @@ -47,7 +47,7 @@ constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance for constexpr AbsoluteTolerance tolerance_quantized(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const RelativeTolerance tolerance_fp16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr float tolerance_num_fp16 = 0.01f; /**< Tolerance number for FP16 tests -- follows a slightly stricter approach compared to ConvolutionLayer tests */ +constexpr float tolerance_num_fp16 = 0.02f; /**< Tolerance number for FP16 tests -- follows a slightly stricter approach compared to ConvolutionLayer tests */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ constexpr float tolerance_num_quant = 0.07f; /**< Tolerance number for quantized types */ -- cgit v1.2.1