aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--tests/validation/NEON/DilatedConvolutionLayer.cpp12
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp10
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h2
3 files changed, 14 insertions, 10 deletions
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index 25b357ebed..56b2dc0ed8 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -45,9 +45,11 @@ namespace
{
const AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
+const AbsoluteTolerance<float> abs_tolerance_f16(0.3f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
@@ -158,7 +160,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<half>, fra
framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f16);
+ validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
@@ -167,7 +169,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<half>, fra
framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f16);
+ validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
}
TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index 3aeba7a969..8d64345254 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -45,8 +45,10 @@ namespace
/** Tolerance for float operations */
constexpr RelativeTolerance<float> tolerance_f32(0.01f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-constexpr RelativeTolerance<float> tolerance_f16(0.01f);
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
+const AbsoluteTolerance<float> abs_tolerance_f16(0.3f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
/** Tolerance for quantized asymmetric operations */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
@@ -174,14 +176,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<half>, framework::
framework::dataset::make("DataType", DataType::F16)))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f16);
+ validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeFullyConnectedLayerDataset(),
FullyConnectedParameters),
framework::dataset::make("DataType", DataType::F16)))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f16);
+ validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
}
TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index dd67f9aada..1e4a74445f 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -83,7 +83,7 @@ protected:
}
else if(is_data_type_float(_data_type))
{
- std::uniform_real_distribution<> distribution(0.5f, 1.f);
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
library->fill(tensor, distribution, i);
}
else