aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/DilatedConvolutionLayer.cpp
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-08-21 11:57:17 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit84cb75db5da157977a9c5b2dc5221d45dcdb926e (patch)
tree114601701a57364bf27745f8776ce93959218fdc /tests/validation/CL/DilatedConvolutionLayer.cpp
parenta7cbc740dc22ab7289c9a006daf39fb35a78248d (diff)
downloadComputeLibrary-84cb75db5da157977a9c5b2dc5221d45dcdb926e.tar.gz
COMPMID-1524 - Release Branch: CL GEMMDilatedConvolutionLayer FP16 mismatches
The mismatches are due by the FP16 reference implementation which does not match exactly the fp16 acceleration we have on Mali. The only workaround for this is to introduce a tolerance number. So if the number of mismatches is greater than 7%, the test fails Change-Id: Iab7bbee6d77eb5b5fda410c350025060c1cc7724 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144964 Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/CL/DilatedConvolutionLayer.cpp')
-rw-r--r--tests/validation/CL/DilatedConvolutionLayer.cpp13
1 files changed, 7 insertions, 6 deletions
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index d7642eb172..9ebde38bcf 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -43,11 +43,12 @@ namespace validation
{
namespace
{
-RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr AbsoluteTolerance<float> abs_tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
-constexpr float abs_tolerance_f32 = 0.001f; /**< Tolerance number */
-constexpr float abs_tolerance_f16 = 0.07f; /**< Tolerance number */
+RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+constexpr AbsoluteTolerance<float> abs_tolerance_qasymm8(0.0); /**< Relative tolerance value for comparing reference's output against implementation's output for quantized data types */
+constexpr float abs_tolerance_f32 = 0.001f; /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr float abs_tolerance_f16 = 0.3f; /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
@@ -177,7 +178,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMDilatedConvolutionLayerFixture<half>, fra
framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
- validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0.0f, abs_tolerance_f16);
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16);
}
TEST_SUITE_END()