From 6106a4de410e7cc59515dd889e159bee7aa45d35 Mon Sep 17 00:00:00 2001 From: Moritz Pflanzer Date: Wed, 2 Aug 2017 09:42:27 +0100 Subject: COMPMID-415: Use absolute and relative tolerance Change-Id: Ib779fa307e05fa67172ddaf521239b4c746debc8 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/82229 Reviewed-by: Anthony Barbier Tested-by: Kaizen --- tests/validation_new/CL/ActivationLayer.cpp | 24 +-- tests/validation_new/CL/ConvolutionLayer.cpp | 6 +- tests/validation_new/CL/DirectConvolutionLayer.cpp | 4 +- tests/validation_new/CL/NormalizationLayer.cpp | 8 +- tests/validation_new/CL/SoftmaxLayer.cpp | 6 +- tests/validation_new/NEON/ActivationLayer.cpp | 12 +- tests/validation_new/NEON/ConvolutionLayer.cpp | 8 +- .../validation_new/NEON/DirectConvolutionLayer.cpp | 8 +- tests/validation_new/NEON/NormalizationLayer.cpp | 8 +- tests/validation_new/NEON/SoftmaxLayer.cpp | 6 +- tests/validation_new/Validation.cpp | 34 ++-- tests/validation_new/Validation.h | 174 ++++++++++++++++----- 12 files changed, 201 insertions(+), 97 deletions(-) (limited to 'tests/validation_new') diff --git a/tests/validation_new/CL/ActivationLayer.cpp b/tests/validation_new/CL/ActivationLayer.cpp index e1cc4e54e2..7f9bcccd11 100644 --- a/tests/validation_new/CL/ActivationLayer.cpp +++ b/tests/validation_new/CL/ActivationLayer.cpp @@ -51,46 +51,48 @@ namespace * * @return Tolerance depending on the activation function. */ -float tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type) +AbsoluteTolerance tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type) { + constexpr float epsilon = std::numeric_limits::epsilon(); + switch(activation) { case ActivationLayerInfo::ActivationFunction::LINEAR: - return data_type == DataType::F16 ? 0.2f : 0.f; + return AbsoluteTolerance(data_type == DataType::F16 ? 0.2f : epsilon); case ActivationLayerInfo::ActivationFunction::SQUARE: - return data_type == DataType::F16 ? 0.1f : 0.f; + return AbsoluteTolerance(data_type == DataType::F16 ? 0.1f : epsilon); case ActivationLayerInfo::ActivationFunction::LOGISTIC: if(is_data_type_fixed_point(data_type)) { - return 5.f; + return AbsoluteTolerance(5.f); } else { - return data_type == DataType::F16 ? 0.001f : 0.f; + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : epsilon); } case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - return data_type == DataType::F16 ? 0.00001f : 0.f; + return AbsoluteTolerance(data_type == DataType::F16 ? 0.00001f : epsilon); case ActivationLayerInfo::ActivationFunction::SOFT_RELU: case ActivationLayerInfo::ActivationFunction::SQRT: if(is_data_type_fixed_point(data_type)) { - return 5.f; + return AbsoluteTolerance(5.f); } else { - return data_type == DataType::F16 ? 0.01f : 0.00001f; + return AbsoluteTolerance(data_type == DataType::F16 ? 0.01f : 0.00001f); } case ActivationLayerInfo::ActivationFunction::TANH: if(is_data_type_fixed_point(data_type)) { - return 5.f; + return AbsoluteTolerance(5.f); } else { - return data_type == DataType::F16 ? 0.001f : 0.00001f; + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : 0.00001f); } default: - return 0.f; + return AbsoluteTolerance(epsilon); } } diff --git a/tests/validation_new/CL/ConvolutionLayer.cpp b/tests/validation_new/CL/ConvolutionLayer.cpp index 398feb7966..9703e0bcba 100644 --- a/tests/validation_new/CL/ConvolutionLayer.cpp +++ b/tests/validation_new/CL/ConvolutionLayer.cpp @@ -44,9 +44,9 @@ namespace validation { namespace { -constexpr float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ -constexpr float tolerance_f16 = 0.1f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ +constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +constexpr AbsoluteTolerance tolerance_f16(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +constexpr AbsoluteTolerance tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", diff --git a/tests/validation_new/CL/DirectConvolutionLayer.cpp b/tests/validation_new/CL/DirectConvolutionLayer.cpp index 9cffabae42..1a7cd6b3fb 100644 --- a/tests/validation_new/CL/DirectConvolutionLayer.cpp +++ b/tests/validation_new/CL/DirectConvolutionLayer.cpp @@ -43,8 +43,8 @@ namespace validation { namespace { -constexpr float tolerance_fp16 = 0.1f; /**< Tolerance for floating point tests */ -constexpr float tolerance_fp32 = 0.001f; /**< Tolerance for floating point tests */ +constexpr AbsoluteTolerance tolerance_fp16(0.1f); /**< Tolerance for floating point tests */ +constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ /** Direct convolution data set. */ const auto data = combine(datasets::SmallDirectConvolutionShapes(), diff --git a/tests/validation_new/CL/NormalizationLayer.cpp b/tests/validation_new/CL/NormalizationLayer.cpp index 22ca96423a..ebef18a8bd 100644 --- a/tests/validation_new/CL/NormalizationLayer.cpp +++ b/tests/validation_new/CL/NormalizationLayer.cpp @@ -46,12 +46,12 @@ namespace { /** Tolerance for float operations */ #ifdef ARM_COMPUTE_ENABLE_FP16 -constexpr float tolerance_f16 = 0.001f; +constexpr AbsoluteTolerance tolerance_f16(0.001f); #endif /* ARM_COMPUTE_ENABLE_FP16 */ -constexpr float tolerance_f32 = 0.00001f; +constexpr AbsoluteTolerance tolerance_f32(0.00001f); /** Tolerance for fixed point operations */ -constexpr int8_t tolerance_qs8 = 2; -constexpr int16_t tolerance_qs16 = 2; +constexpr AbsoluteTolerance tolerance_qs8(2); +constexpr AbsoluteTolerance tolerance_qs16(2); /** Input data set. */ const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })), diff --git a/tests/validation_new/CL/SoftmaxLayer.cpp b/tests/validation_new/CL/SoftmaxLayer.cpp index 3edc7b2d6e..d13236a2f9 100644 --- a/tests/validation_new/CL/SoftmaxLayer.cpp +++ b/tests/validation_new/CL/SoftmaxLayer.cpp @@ -44,10 +44,10 @@ namespace validation namespace { /** Tolerance for float operations */ -constexpr float tolerance_f16 = 0.002f; -constexpr float tolerance_f32 = 0.000001f; +constexpr AbsoluteTolerance tolerance_f16(0.002f); +constexpr AbsoluteTolerance tolerance_f32(0.000001f); /** Tolerance for fixed point operations */ -constexpr int8_t tolerance_fixed_point = 2; +constexpr AbsoluteTolerance tolerance_fixed_point(2); /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", diff --git a/tests/validation_new/NEON/ActivationLayer.cpp b/tests/validation_new/NEON/ActivationLayer.cpp index db0faaecdf..bc2fe603fd 100644 --- a/tests/validation_new/NEON/ActivationLayer.cpp +++ b/tests/validation_new/NEON/ActivationLayer.cpp @@ -51,7 +51,7 @@ namespace * * @return Tolerance depending on the activation function. */ -float tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation) +AbsoluteTolerance tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation) { switch(activation) { @@ -62,17 +62,17 @@ float tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction acti switch(data_type) { case DataType::QS8: - return 5.f; + return AbsoluteTolerance(5.f); case DataType::QS16: - return 11.f; + return AbsoluteTolerance(11.f); case DataType::F16: - return 0.01f; + return AbsoluteTolerance(0.01f); default: - return 0.00001f; + return AbsoluteTolerance(0.00001f); } break; default: - return 0.f; + return AbsoluteTolerance(0.f); } } diff --git a/tests/validation_new/NEON/ConvolutionLayer.cpp b/tests/validation_new/NEON/ConvolutionLayer.cpp index af33cc0707..1efff02428 100644 --- a/tests/validation_new/NEON/ConvolutionLayer.cpp +++ b/tests/validation_new/NEON/ConvolutionLayer.cpp @@ -44,11 +44,11 @@ namespace validation { namespace { -const float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +const AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ #ifdef ARM_COMPUTE_ENABLE_FP16 -const float tolerance_f16 = 0.01f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -#endif /* ARM_COMPUTE_ENABLE_FP16 */ -const float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ +const AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ +const AbsoluteTolerance tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", diff --git a/tests/validation_new/NEON/DirectConvolutionLayer.cpp b/tests/validation_new/NEON/DirectConvolutionLayer.cpp index a46f5a5dcc..90c4abe9c2 100644 --- a/tests/validation_new/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation_new/NEON/DirectConvolutionLayer.cpp @@ -43,11 +43,11 @@ namespace validation { namespace { -constexpr float tolerance_qs = 1.f; /**< Tolerance for fixed point tests */ +constexpr AbsoluteTolerance tolerance_qs(1.f); /**< Tolerance for fixed point tests */ #ifdef ARM_COMPUTE_ENABLE_FP16 -constexpr float tolerance_fp16 = 0.01f; /**< Tolerance for half precision floating point tests */ -#endif /* ARM_COMPUTE_ENABLE_FP16 */ -constexpr float tolerance_fp32 = 0.001f; /**< Tolerance for floating point tests */ +constexpr AbsoluteTolerance tolerance_fp16(0.01f); /**< Tolerance for half precision floating point tests */ +#endif /* ARM_COMPUTE_ENABLE_FP16 */ +constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ /** Direct convolution data set. */ const auto data = combine(datasets::SmallDirectConvolutionShapes(), diff --git a/tests/validation_new/NEON/NormalizationLayer.cpp b/tests/validation_new/NEON/NormalizationLayer.cpp index dfe793131a..1da2ed0874 100644 --- a/tests/validation_new/NEON/NormalizationLayer.cpp +++ b/tests/validation_new/NEON/NormalizationLayer.cpp @@ -46,12 +46,12 @@ namespace { /** Tolerance for float operations */ #ifdef ARM_COMPUTE_ENABLE_FP16 -constexpr float tolerance_f16 = 0.001f; +constexpr AbsoluteTolerance tolerance_f16(0.001f); #endif /* ARM_COMPUTE_ENABLE_FP16 */ -constexpr float tolerance_f32 = 0.00001f; +constexpr AbsoluteTolerance tolerance_f32(0.00001f); /** Tolerance for fixed point operations */ -constexpr int8_t tolerance_qs8 = 2; -constexpr int16_t tolerance_qs16 = 3; +constexpr AbsoluteTolerance tolerance_qs8(2); +constexpr AbsoluteTolerance tolerance_qs16(3); /** Input data set. */ const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), diff --git a/tests/validation_new/NEON/SoftmaxLayer.cpp b/tests/validation_new/NEON/SoftmaxLayer.cpp index ce5b8b8359..337ee29986 100644 --- a/tests/validation_new/NEON/SoftmaxLayer.cpp +++ b/tests/validation_new/NEON/SoftmaxLayer.cpp @@ -44,12 +44,12 @@ namespace validation namespace { /** Tolerance for float operations */ -constexpr float tolerance_f32 = 0.000001f; +constexpr AbsoluteTolerance tolerance_f32(0.000001f); #ifdef ARM_COMPUTE_ENABLE_FP16 -const float tolerance_f16 = 0.0001f; +constexpr AbsoluteTolerance tolerance_f16(0.0001f); #endif /* ARM_COMPUTE_ENABLE_FP16*/ /** Tolerance for fixed point operations */ -constexpr int8_t tolerance_fixed_point = 2; +constexpr AbsoluteTolerance tolerance_fixed_point(2); /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", diff --git a/tests/validation_new/Validation.cpp b/tests/validation_new/Validation.cpp index a492bb6505..fec7c10939 100644 --- a/tests/validation_new/Validation.cpp +++ b/tests/validation_new/Validation.cpp @@ -128,17 +128,16 @@ void check_border_element(const IAccessor &tensor, const Coordinates &id, { const size_t channel_offset = channel * channel_size; const double target = get_double_data(ptr + channel_offset, tensor.data_type()); - const double ref = get_double_data(static_cast(border_value) + channel_offset, tensor.data_type()); - const bool equal = is_equal(target, ref); + const double reference = get_double_data(static_cast(border_value) + channel_offset, tensor.data_type()); - ARM_COMPUTE_TEST_INFO("id = " << id); - ARM_COMPUTE_TEST_INFO("channel = " << channel); - ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target); - ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << ref); - ARM_COMPUTE_EXPECT_EQUAL(target, ref, framework::LogLevel::DEBUG); - - if(!equal) + if(!compare, double>(target, reference)) { + ARM_COMPUTE_TEST_INFO("id = " << id); + ARM_COMPUTE_TEST_INFO("channel = " << channel); + ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target); + ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference); + ARM_COMPUTE_EXPECT_EQUAL(target, reference, framework::LogLevel::DEBUG); + ++num_mismatches; } @@ -191,17 +190,16 @@ void validate(const IAccessor &tensor, const void *reference_value) { const size_t channel_offset = channel * channel_size; const double target = get_double_data(ptr + channel_offset, tensor.data_type()); - const double ref = get_double_data(reference_value, tensor.data_type()); - const bool equal = is_equal(target, ref); + const double reference = get_double_data(reference_value, tensor.data_type()); - ARM_COMPUTE_TEST_INFO("id = " << id); - ARM_COMPUTE_TEST_INFO("channel = " << channel); - ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target); - ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << ref); - ARM_COMPUTE_EXPECT_EQUAL(target, ref, framework::LogLevel::DEBUG); - - if(!equal) + if(!compare, double>(target, reference)) { + ARM_COMPUTE_TEST_INFO("id = " << id); + ARM_COMPUTE_TEST_INFO("channel = " << channel); + ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target); + ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference); + ARM_COMPUTE_EXPECT_EQUAL(target, reference, framework::LogLevel::DEBUG); + ++num_mismatches; } diff --git a/tests/validation_new/Validation.h b/tests/validation_new/Validation.h index 91b17145be..b21d12932a 100644 --- a/tests/validation_new/Validation.h +++ b/tests/validation_new/Validation.h @@ -43,6 +43,88 @@ namespace test { namespace validation { +/** Class reprensenting an absolute tolerance value. */ +template +class AbsoluteTolerance +{ +public: + /** Underlying type. */ + using value_type = T; + + /* Default constructor. + * + * Initialises the tolerance to 0. + */ + AbsoluteTolerance() = default; + + /** Constructor. + * + * @param[in] value Absolute tolerance value. + */ + explicit constexpr AbsoluteTolerance(T value) + : _value{ value } + { + } + + /** Implicit conversion to the underlying type. */ + constexpr operator T() const + { + return _value; + } + +private: + T _value{ std::numeric_limits::epsilon() }; +}; + +/** Class reprensenting a relative tolerance value. */ +class RelativeTolerance +{ +public: + /** Underlying type. */ + using value_type = double; + + /* Default constructor. + * + * Initialises the tolerance to 0. + */ + RelativeTolerance() = default; + + /** Constructor. + * + * @param[in] value Relative tolerance value. + */ + explicit constexpr RelativeTolerance(value_type value) + : _value{ value } + { + } + + /** Implicit conversion to the underlying type. */ + constexpr operator value_type() const + { + return _value; + } + +private: + value_type _value{ 0 }; +}; + +/** Print AbsoluteTolerance type. */ +template +inline ::std::ostream &operator<<(::std::ostream &os, const AbsoluteTolerance &tolerance) +{ + os << static_cast::value_type>(tolerance); + + return os; +} + +/** Print RelativeTolerance type. */ +inline ::std::ostream &operator<<(::std::ostream &os, const RelativeTolerance &tolerance) +{ + os << static_cast(tolerance); + + return os; +} + template bool compare_dimensions(const Dimensions &dimensions1, const Dimensions &dimensions2) { @@ -86,8 +168,8 @@ void validate(const arm_compute::PaddingSize &padding, const arm_compute::Paddin * reference tensor and test tensor is multiple of wrap_range), but such errors would be detected by * other test cases. */ -template -void validate(const IAccessor &tensor, const SimpleTensor &reference, U tolerance_value = U(0), float tolerance_number = 0.f); +template > +void validate(const IAccessor &tensor, const SimpleTensor &reference, U tolerance_value = U(), float tolerance_number = 0.f); /** Validate tensors with valid region. * @@ -99,8 +181,8 @@ void validate(const IAccessor &tensor, const SimpleTensor &reference, U toler * reference tensor and test tensor is multiple of wrap_range), but such errors would be detected by * other test cases. */ -template -void validate(const IAccessor &tensor, const SimpleTensor &reference, const ValidRegion &valid_region, U tolerance_value = U(0), float tolerance_number = 0.f); +template > +void validate(const IAccessor &tensor, const SimpleTensor &reference, const ValidRegion &valid_region, U tolerance_value = U(), float tolerance_number = 0.f); /** Validate tensors against constant value. * @@ -126,42 +208,66 @@ void validate(std::vector classified_labels, std::vector -void validate(T target, T ref, U tolerance_abs_error = std::numeric_limits::epsilon(), double tolerance_relative_error = 0.0001f); +template +void validate(T target, T reference, U tolerance = AbsoluteTolerance()); -template -bool is_equal(T target, T ref, U max_absolute_error = std::numeric_limits::epsilon(), double max_relative_error = 0.0001f) +template +struct compare_base { - if(!std::isfinite(target) || !std::isfinite(ref)) + compare_base(typename T::value_type target, typename T::value_type reference, T tolerance = T(0)) + : _target{ target }, _reference{ reference }, _tolerance{ tolerance } { - return false; } - // No need further check if they are equal - if(ref == target) - { - return true; - } + typename T::value_type _target{}; + typename T::value_type _reference{}; + T _tolerance{}; +}; - // Need this check for the situation when the two values close to zero but have different sign - if(std::abs(std::abs(ref) - std::abs(target)) <= max_absolute_error) - { - return true; - } +template +struct compare; - double relative_error = 0; +template +struct compare, U> : public compare_base> +{ + using compare_base>::compare_base; - if(std::abs(target) > std::abs(ref)) + operator bool() { - relative_error = std::abs(static_cast(target - ref) / target); + if(!std::isfinite(this->_target) || !std::isfinite(this->_reference)) + { + return false; + } + else if(this->_target == this->_reference) + { + return true; + } + + return static_cast(std::abs(this->_target - this->_reference)) <= static_cast(this->_tolerance); } - else +}; + +template +struct compare : public compare_base +{ + using compare_base::compare_base; + + operator bool() { - relative_error = std::abs(static_cast(ref - target) / ref); - } + if(!std::isfinite(_target) || !std::isfinite(_reference)) + { + return false; + } + else if(_target == _reference) + { + return true; + } - return relative_error <= max_relative_error; -} + const double relative_change = std::abs(static_cast(_target - _reference)) / _reference; + + return relative_change <= _tolerance; + } +}; template void validate(const IAccessor &tensor, const SimpleTensor &reference, U tolerance_value, float tolerance_number) @@ -198,7 +304,7 @@ void validate(const IAccessor &tensor, const SimpleTensor &reference, const V const T &target_value = reinterpret_cast(tensor(id))[c]; const T &reference_value = reinterpret_cast(reference(id))[c]; - if(!is_equal(target_value, reference_value, tolerance_value)) + if(!compare(target_value, reference_value, tolerance_value)) { ARM_COMPUTE_TEST_INFO("id = " << id); ARM_COMPUTE_TEST_INFO("channel = " << c); @@ -227,14 +333,12 @@ void validate(const IAccessor &tensor, const SimpleTensor &reference, const V } template -void validate(T target, T ref, U tolerance_abs_error, double tolerance_relative_error) +void validate(T target, T reference, U tolerance) { - const bool equal = is_equal(target, ref, tolerance_abs_error, tolerance_relative_error); - - ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << ref); + ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference); ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target); - ARM_COMPUTE_TEST_INFO("tolerance = " << std::setprecision(5) << tolerance_abs_error); - ARM_COMPUTE_EXPECT(equal, framework::LogLevel::ERRORS); + ARM_COMPUTE_TEST_INFO("tolerance = " << std::setprecision(5) << tolerance); + ARM_COMPUTE_EXPECT((compare(target, reference, tolerance)), framework::LogLevel::ERRORS); } } // namespace validation } // namespace test -- cgit v1.2.1