diff options
author | SiCong Li <sicong.li@arm.com> | 2023-05-15 14:35:16 +0100 |
---|---|---|
committer | Saginsky <Saginsky@review.mlplatform.org> | 2023-05-15 14:56:17 +0000 |
commit | bae01a523dc9fa2a50e9fe350d8e23509489ef91 (patch) | |
tree | f7b0fee6eb4ced5caafe59a24253cd184ae2ca53 /tests/validation | |
parent | 34a95d11a81277e90f1b141384e4beb4dda84854 (diff) | |
download | ComputeLibrary-bae01a523dc9fa2a50e9fe350d8e23509489ef91.tar.gz |
Raise tolerance number for NEDeconvolutionLayer fp16 tests
Tolerance number was too strict for fp16
Resolves COMPMID-6254
Signed-off-by: SiCong Li <sicong.li@arm.com>
Change-Id: I42a5df21c2545c38ea7234497effd232b43aabf8
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9635
Reviewed-by: Jakub Sujak <jakub.sujak@arm.com>
Reviewed-by: Omar Al Khatib <omar.alkhatib@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/NEON/DeconvolutionLayer.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp index a42042bcd1..af25543193 100644 --- a/tests/validation/NEON/DeconvolutionLayer.cpp +++ b/tests/validation/NEON/DeconvolutionLayer.cpp @@ -47,7 +47,7 @@ constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for constexpr AbsoluteTolerance<float> tolerance_quantized(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const RelativeTolerance<half_float::half> tolerance_fp16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr float tolerance_num_fp16 = 0.01f; /**< Tolerance number for FP16 tests -- follows a slightly stricter approach compared to ConvolutionLayer tests */ +constexpr float tolerance_num_fp16 = 0.02f; /**< Tolerance number for FP16 tests -- follows a slightly stricter approach compared to ConvolutionLayer tests */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ constexpr float tolerance_num_quant = 0.07f; /**< Tolerance number for quantized types */ |