diff options
author | Moritz Pflanzer <moritz.pflanzer@arm.com> | 2017-08-02 09:42:27 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-09-17 14:16:42 +0100 |
commit | 6106a4de410e7cc59515dd889e159bee7aa45d35 (patch) | |
tree | 80dd491ae3865dd00a8e272bbf66b72274a0efe2 /tests/validation_new/CL | |
parent | d8e765ba79772ecc3deda89ae3adab903c744296 (diff) | |
download | ComputeLibrary-6106a4de410e7cc59515dd889e159bee7aa45d35.tar.gz |
COMPMID-415: Use absolute and relative tolerance
Change-Id: Ib779fa307e05fa67172ddaf521239b4c746debc8
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/82229
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests/validation_new/CL')
-rw-r--r-- | tests/validation_new/CL/ActivationLayer.cpp | 24 | ||||
-rw-r--r-- | tests/validation_new/CL/ConvolutionLayer.cpp | 6 | ||||
-rw-r--r-- | tests/validation_new/CL/DirectConvolutionLayer.cpp | 4 | ||||
-rw-r--r-- | tests/validation_new/CL/NormalizationLayer.cpp | 8 | ||||
-rw-r--r-- | tests/validation_new/CL/SoftmaxLayer.cpp | 6 |
5 files changed, 25 insertions, 23 deletions
diff --git a/tests/validation_new/CL/ActivationLayer.cpp b/tests/validation_new/CL/ActivationLayer.cpp index e1cc4e54e2..7f9bcccd11 100644 --- a/tests/validation_new/CL/ActivationLayer.cpp +++ b/tests/validation_new/CL/ActivationLayer.cpp @@ -51,46 +51,48 @@ namespace * * @return Tolerance depending on the activation function. */ -float tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type) +AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type) { + constexpr float epsilon = std::numeric_limits<float>::epsilon(); + switch(activation) { case ActivationLayerInfo::ActivationFunction::LINEAR: - return data_type == DataType::F16 ? 0.2f : 0.f; + return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.2f : epsilon); case ActivationLayerInfo::ActivationFunction::SQUARE: - return data_type == DataType::F16 ? 0.1f : 0.f; + return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.1f : epsilon); case ActivationLayerInfo::ActivationFunction::LOGISTIC: if(is_data_type_fixed_point(data_type)) { - return 5.f; + return AbsoluteTolerance<float>(5.f); } else { - return data_type == DataType::F16 ? 0.001f : 0.f; + return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon); } case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - return data_type == DataType::F16 ? 0.00001f : 0.f; + return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.00001f : epsilon); case ActivationLayerInfo::ActivationFunction::SOFT_RELU: case ActivationLayerInfo::ActivationFunction::SQRT: if(is_data_type_fixed_point(data_type)) { - return 5.f; + return AbsoluteTolerance<float>(5.f); } else { - return data_type == DataType::F16 ? 0.01f : 0.00001f; + return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f); } case ActivationLayerInfo::ActivationFunction::TANH: if(is_data_type_fixed_point(data_type)) { - return 5.f; + return AbsoluteTolerance<float>(5.f); } else { - return data_type == DataType::F16 ? 0.001f : 0.00001f; + return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f); } default: - return 0.f; + return AbsoluteTolerance<float>(epsilon); } } diff --git a/tests/validation_new/CL/ConvolutionLayer.cpp b/tests/validation_new/CL/ConvolutionLayer.cpp index 398feb7966..9703e0bcba 100644 --- a/tests/validation_new/CL/ConvolutionLayer.cpp +++ b/tests/validation_new/CL/ConvolutionLayer.cpp @@ -44,9 +44,9 @@ namespace validation { namespace { -constexpr float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ -constexpr float tolerance_f16 = 0.1f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ +constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +constexpr AbsoluteTolerance<float> tolerance_f16(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", diff --git a/tests/validation_new/CL/DirectConvolutionLayer.cpp b/tests/validation_new/CL/DirectConvolutionLayer.cpp index 9cffabae42..1a7cd6b3fb 100644 --- a/tests/validation_new/CL/DirectConvolutionLayer.cpp +++ b/tests/validation_new/CL/DirectConvolutionLayer.cpp @@ -43,8 +43,8 @@ namespace validation { namespace { -constexpr float tolerance_fp16 = 0.1f; /**< Tolerance for floating point tests */ -constexpr float tolerance_fp32 = 0.001f; /**< Tolerance for floating point tests */ +constexpr AbsoluteTolerance<float> tolerance_fp16(0.1f); /**< Tolerance for floating point tests */ +constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ /** Direct convolution data set. */ const auto data = combine(datasets::SmallDirectConvolutionShapes(), diff --git a/tests/validation_new/CL/NormalizationLayer.cpp b/tests/validation_new/CL/NormalizationLayer.cpp index 22ca96423a..ebef18a8bd 100644 --- a/tests/validation_new/CL/NormalizationLayer.cpp +++ b/tests/validation_new/CL/NormalizationLayer.cpp @@ -46,12 +46,12 @@ namespace { /** Tolerance for float operations */ #ifdef ARM_COMPUTE_ENABLE_FP16 -constexpr float tolerance_f16 = 0.001f; +constexpr AbsoluteTolerance<float> tolerance_f16(0.001f); #endif /* ARM_COMPUTE_ENABLE_FP16 */ -constexpr float tolerance_f32 = 0.00001f; +constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /** Tolerance for fixed point operations */ -constexpr int8_t tolerance_qs8 = 2; -constexpr int16_t tolerance_qs16 = 2; +constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2); +constexpr AbsoluteTolerance<int16_t> tolerance_qs16(2); /** Input data set. */ const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })), diff --git a/tests/validation_new/CL/SoftmaxLayer.cpp b/tests/validation_new/CL/SoftmaxLayer.cpp index 3edc7b2d6e..d13236a2f9 100644 --- a/tests/validation_new/CL/SoftmaxLayer.cpp +++ b/tests/validation_new/CL/SoftmaxLayer.cpp @@ -44,10 +44,10 @@ namespace validation namespace { /** Tolerance for float operations */ -constexpr float tolerance_f16 = 0.002f; -constexpr float tolerance_f32 = 0.000001f; +constexpr AbsoluteTolerance<float> tolerance_f16(0.002f); +constexpr AbsoluteTolerance<float> tolerance_f32(0.000001f); /** Tolerance for fixed point operations */ -constexpr int8_t tolerance_fixed_point = 2; +constexpr AbsoluteTolerance<int8_t> tolerance_fixed_point(2); /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", |