diff options
author | Gian Marco Iodice <gianmarco.iodice@arm.com> | 2018-08-23 10:25:06 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:54:54 +0000 |
commit | 41acb76af9c8512ac39121103b21ce2aafbcbfe8 (patch) | |
tree | 788106d83c95c88954698a3f7d25d02db1cfe024 /tests/validation | |
parent | 02baf01d75dc639440cf6a3196162f02413661dc (diff) | |
download | ComputeLibrary-41acb76af9c8512ac39121103b21ce2aafbcbfe8.tar.gz |
COMPMID-1534 - Fixing FP16 tests on NEON
- Fixed GEMMConvolutionLayer test. The issue was related to the tolerance
- Fixed DirectConvolutioNLayer test. The issue was in the convolver_3x3
Change-Id: I9d5b906d7e5e32a0a34300d529d6edb804ac1c4e
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/145377
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/NEON/ConvolutionLayer.cpp | 12 | ||||
-rw-r--r-- | tests/validation/NEON/DirectConvolutionLayer.cpp | 32 |
2 files changed, 31 insertions, 13 deletions
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 58f3f0df37..18072e0532 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -50,9 +50,11 @@ namespace RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance for FP32 types */ const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute tolerance for FP32 types */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ +const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */ +const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */ +constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */ +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -206,7 +208,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework: ActivationFunctionsDataset)) { // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), @@ -215,7 +217,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<half>, framework: ActivationFunctionsDataset)) { // Validate output - validate(Accessor(_target), _reference, tolerance_f16); + validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16); } TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp index acd0e5d64b..cd186e05cd 100644 --- a/tests/validation/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation/NEON/DirectConvolutionLayer.cpp @@ -43,11 +43,13 @@ namespace validation namespace { #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance<float> tolerance_fp16(0.01f); /**< Tolerance for half precision floating point tests */ -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ +const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for FP16 types */ +const AbsoluteTolerance<float> abs_tolerance_f16(0.2f); /**< Absolute tolerance for FP16 types */ +constexpr float tolerance_num = 0.07f; /**< Tolerance number for the FP16 implementation */ +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ -/** Direct convolution data set. */ +/** Direct convolution data set.for FP32 */ const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", { 0, 1 }), combine(framework::dataset::make("PadY", { 0, 1 }), framework::dataset::make("KernelSize", 3))), @@ -58,12 +60,26 @@ const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", combine(framework::dataset::make("PadY", { 0, 3 }), framework::dataset::make("KernelSize", 5)))); +/** Direct convolution data set.for FP16 */ +const auto data_pad_f16 = concat(combine(framework::dataset::make("PadX", { 0, 1 }), + combine(framework::dataset::make("PadY", { 0, 1 }), + framework::dataset::make("KernelSize", 3))), + combine(framework::dataset::make("PadX", { 0 }), + combine(framework::dataset::make("PadY", { 0 }), + framework::dataset::make("KernelSize", 1)))); + const auto data_f32 = combine(datasets::SmallDirectConvolutionShapes(), - combine(framework::dataset::make("StrideX", { 1, 3 }), - combine(framework::dataset::make("StrideY", { 1, 3 }), + combine(framework::dataset::make("StrideX", { 1, 2, 3 }), + combine(framework::dataset::make("StrideY", { 1, 2, 3 }), combine(data_pad_f32, framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); +const auto data_f16 = combine(datasets::SmallDirectConvolutionShapes(), + combine(framework::dataset::make("StrideX", { 1, 2, 3 }), + combine(framework::dataset::make("StrideY", { 1, 2, 3 }), + combine(data_pad_f16, + framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); + /** Activation function Dataset*/ const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", { @@ -152,12 +168,12 @@ using NEDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<Tenso TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(data_f32, framework::dataset::make("DataType", DataType::F16)), +FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(data_f16, framework::dataset::make("DataType", DataType::F16)), ActivationFunctionsDataset), framework::dataset::make("DataLayout", DataLayout::NCHW))) { // Validate output - validate(Accessor(_target), _reference, tolerance_fp16); + validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16); } TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ |