From 84cb75db5da157977a9c5b2dc5221d45dcdb926e Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Tue, 21 Aug 2018 11:57:17 +0100 Subject: COMPMID-1524 - Release Branch: CL GEMMDilatedConvolutionLayer FP16 mismatches The mismatches are due by the FP16 reference implementation which does not match exactly the fp16 acceleration we have on Mali. The only workaround for this is to introduce a tolerance number. So if the number of mismatches is greater than 7%, the test fails Change-Id: Iab7bbee6d77eb5b5fda410c350025060c1cc7724 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144964 Reviewed-by: Michele DiGiorgio Tested-by: Jenkins --- tests/validation/CL/DilatedConvolutionLayer.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'tests/validation/CL') diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp index d7642eb172..9ebde38bcf 100644 --- a/tests/validation/CL/DilatedConvolutionLayer.cpp +++ b/tests/validation/CL/DilatedConvolutionLayer.cpp @@ -43,11 +43,12 @@ namespace validation { namespace { -RelativeTolerance rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ -RelativeTolerance rel_tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance abs_tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ -constexpr float abs_tolerance_f32 = 0.001f; /**< Tolerance number */ -constexpr float abs_tolerance_f16 = 0.07f; /**< Tolerance number */ +RelativeTolerance rel_tolerance_f32(0.05f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +RelativeTolerance rel_tolerance_f16(half_float::half(0.2)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +constexpr AbsoluteTolerance abs_tolerance_qasymm8(0.0); /**< Relative tolerance value for comparing reference's output against implementation's output for quantized data types */ +constexpr float abs_tolerance_f32 = 0.001f; /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +constexpr float abs_tolerance_f16 = 0.3f; /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -177,7 +178,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerFixture, fra framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) { // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.0f, abs_tolerance_f16); + validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } TEST_SUITE_END() -- cgit v1.2.1