From 35aea3776449557c438e264bae7af5b1fe0e5ff6 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Fri, 24 Aug 2018 14:30:36 +0100 Subject: COMPMID-1534 - Fixed GEMMDilated and FullyConnected tests for FP16 Accuracy issue Change-Id: Ibbce625251524ee5aa62b15ac78ba0f70efcc7bc Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/145585 Reviewed-by: Pablo Tello Tested-by: Jenkins --- tests/validation/NEON/DilatedConvolutionLayer.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'tests/validation/NEON/DilatedConvolutionLayer.cpp') diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp index 25b357ebed..56b2dc0ed8 100644 --- a/tests/validation/NEON/DilatedConvolutionLayer.cpp +++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp @@ -45,9 +45,11 @@ namespace { const AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -const AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ +const AbsoluteTolerance abs_tolerance_f16(0.3f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +const RelativeTolerance rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */ +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +constexpr AbsoluteTolerance tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -158,7 +160,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture, fra framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) { // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), @@ -167,7 +169,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture, fra framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) { // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16); } TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -- cgit v1.2.1