aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMoritz Pflanzer <moritz.pflanzer@arm.com>2017-08-02 09:42:27 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commit6106a4de410e7cc59515dd889e159bee7aa45d35 (patch)
tree80dd491ae3865dd00a8e272bbf66b72274a0efe2 /tests
parentd8e765ba79772ecc3deda89ae3adab903c744296 (diff)
downloadComputeLibrary-6106a4de410e7cc59515dd889e159bee7aa45d35.tar.gz
COMPMID-415: Use absolute and relative tolerance
Change-Id: Ib779fa307e05fa67172ddaf521239b4c746debc8 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/82229 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation_new/CL/ActivationLayer.cpp24
-rw-r--r--tests/validation_new/CL/ConvolutionLayer.cpp6
-rw-r--r--tests/validation_new/CL/DirectConvolutionLayer.cpp4
-rw-r--r--tests/validation_new/CL/NormalizationLayer.cpp8
-rw-r--r--tests/validation_new/CL/SoftmaxLayer.cpp6
-rw-r--r--tests/validation_new/NEON/ActivationLayer.cpp12
-rw-r--r--tests/validation_new/NEON/ConvolutionLayer.cpp8
-rw-r--r--tests/validation_new/NEON/DirectConvolutionLayer.cpp8
-rw-r--r--tests/validation_new/NEON/NormalizationLayer.cpp8
-rw-r--r--tests/validation_new/NEON/SoftmaxLayer.cpp6
-rw-r--r--tests/validation_new/Validation.cpp34
-rw-r--r--tests/validation_new/Validation.h174
12 files changed, 201 insertions, 97 deletions
diff --git a/tests/validation_new/CL/ActivationLayer.cpp b/tests/validation_new/CL/ActivationLayer.cpp
index e1cc4e54e2..7f9bcccd11 100644
--- a/tests/validation_new/CL/ActivationLayer.cpp
+++ b/tests/validation_new/CL/ActivationLayer.cpp
@@ -51,46 +51,48 @@ namespace
*
* @return Tolerance depending on the activation function.
*/
-float tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
+AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
{
+ constexpr float epsilon = std::numeric_limits<float>::epsilon();
+
switch(activation)
{
case ActivationLayerInfo::ActivationFunction::LINEAR:
- return data_type == DataType::F16 ? 0.2f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.2f : epsilon);
case ActivationLayerInfo::ActivationFunction::SQUARE:
- return data_type == DataType::F16 ? 0.1f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.1f : epsilon);
case ActivationLayerInfo::ActivationFunction::LOGISTIC:
if(is_data_type_fixed_point(data_type))
{
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
}
else
{
- return data_type == DataType::F16 ? 0.001f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
}
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- return data_type == DataType::F16 ? 0.00001f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.00001f : epsilon);
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
case ActivationLayerInfo::ActivationFunction::SQRT:
if(is_data_type_fixed_point(data_type))
{
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
}
else
{
- return data_type == DataType::F16 ? 0.01f : 0.00001f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
}
case ActivationLayerInfo::ActivationFunction::TANH:
if(is_data_type_fixed_point(data_type))
{
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
}
else
{
- return data_type == DataType::F16 ? 0.001f : 0.00001f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
}
default:
- return 0.f;
+ return AbsoluteTolerance<float>(epsilon);
}
}
diff --git a/tests/validation_new/CL/ConvolutionLayer.cpp b/tests/validation_new/CL/ConvolutionLayer.cpp
index 398feb7966..9703e0bcba 100644
--- a/tests/validation_new/CL/ConvolutionLayer.cpp
+++ b/tests/validation_new/CL/ConvolutionLayer.cpp
@@ -44,9 +44,9 @@ namespace validation
{
namespace
{
-constexpr float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-constexpr float tolerance_f16 = 0.1f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr AbsoluteTolerance<float> tolerance_f16(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
diff --git a/tests/validation_new/CL/DirectConvolutionLayer.cpp b/tests/validation_new/CL/DirectConvolutionLayer.cpp
index 9cffabae42..1a7cd6b3fb 100644
--- a/tests/validation_new/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation_new/CL/DirectConvolutionLayer.cpp
@@ -43,8 +43,8 @@ namespace validation
{
namespace
{
-constexpr float tolerance_fp16 = 0.1f; /**< Tolerance for floating point tests */
-constexpr float tolerance_fp32 = 0.001f; /**< Tolerance for floating point tests */
+constexpr AbsoluteTolerance<float> tolerance_fp16(0.1f); /**< Tolerance for floating point tests */
+constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
/** Direct convolution data set. */
const auto data = combine(datasets::SmallDirectConvolutionShapes(),
diff --git a/tests/validation_new/CL/NormalizationLayer.cpp b/tests/validation_new/CL/NormalizationLayer.cpp
index 22ca96423a..ebef18a8bd 100644
--- a/tests/validation_new/CL/NormalizationLayer.cpp
+++ b/tests/validation_new/CL/NormalizationLayer.cpp
@@ -46,12 +46,12 @@ namespace
{
/** Tolerance for float operations */
#ifdef ARM_COMPUTE_ENABLE_FP16
-constexpr float tolerance_f16 = 0.001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.001f);
#endif /* ARM_COMPUTE_ENABLE_FP16 */
-constexpr float tolerance_f32 = 0.00001f;
+constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_qs8 = 2;
-constexpr int16_t tolerance_qs16 = 2;
+constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2);
+constexpr AbsoluteTolerance<int16_t> tolerance_qs16(2);
/** Input data set. */
const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })),
diff --git a/tests/validation_new/CL/SoftmaxLayer.cpp b/tests/validation_new/CL/SoftmaxLayer.cpp
index 3edc7b2d6e..d13236a2f9 100644
--- a/tests/validation_new/CL/SoftmaxLayer.cpp
+++ b/tests/validation_new/CL/SoftmaxLayer.cpp
@@ -44,10 +44,10 @@ namespace validation
namespace
{
/** Tolerance for float operations */
-constexpr float tolerance_f16 = 0.002f;
-constexpr float tolerance_f32 = 0.000001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.002f);
+constexpr AbsoluteTolerance<float> tolerance_f32(0.000001f);
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_fixed_point = 2;
+constexpr AbsoluteTolerance<int8_t> tolerance_fixed_point(2);
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
diff --git a/tests/validation_new/NEON/ActivationLayer.cpp b/tests/validation_new/NEON/ActivationLayer.cpp
index db0faaecdf..bc2fe603fd 100644
--- a/tests/validation_new/NEON/ActivationLayer.cpp
+++ b/tests/validation_new/NEON/ActivationLayer.cpp
@@ -51,7 +51,7 @@ namespace
*
* @return Tolerance depending on the activation function.
*/
-float tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation)
+AbsoluteTolerance<float> tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation)
{
switch(activation)
{
@@ -62,17 +62,17 @@ float tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction acti
switch(data_type)
{
case DataType::QS8:
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
case DataType::QS16:
- return 11.f;
+ return AbsoluteTolerance<float>(11.f);
case DataType::F16:
- return 0.01f;
+ return AbsoluteTolerance<float>(0.01f);
default:
- return 0.00001f;
+ return AbsoluteTolerance<float>(0.00001f);
}
break;
default:
- return 0.f;
+ return AbsoluteTolerance<float>(0.f);
}
}
diff --git a/tests/validation_new/NEON/ConvolutionLayer.cpp b/tests/validation_new/NEON/ConvolutionLayer.cpp
index af33cc0707..1efff02428 100644
--- a/tests/validation_new/NEON/ConvolutionLayer.cpp
+++ b/tests/validation_new/NEON/ConvolutionLayer.cpp
@@ -44,11 +44,11 @@ namespace validation
{
namespace
{
-const float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+const AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.01f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-const float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
diff --git a/tests/validation_new/NEON/DirectConvolutionLayer.cpp b/tests/validation_new/NEON/DirectConvolutionLayer.cpp
index a46f5a5dcc..90c4abe9c2 100644
--- a/tests/validation_new/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation_new/NEON/DirectConvolutionLayer.cpp
@@ -43,11 +43,11 @@ namespace validation
{
namespace
{
-constexpr float tolerance_qs = 1.f; /**< Tolerance for fixed point tests */
+constexpr AbsoluteTolerance<float> tolerance_qs(1.f); /**< Tolerance for fixed point tests */
#ifdef ARM_COMPUTE_ENABLE_FP16
-constexpr float tolerance_fp16 = 0.01f; /**< Tolerance for half precision floating point tests */
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-constexpr float tolerance_fp32 = 0.001f; /**< Tolerance for floating point tests */
+constexpr AbsoluteTolerance<float> tolerance_fp16(0.01f); /**< Tolerance for half precision floating point tests */
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
/** Direct convolution data set. */
const auto data = combine(datasets::SmallDirectConvolutionShapes(),
diff --git a/tests/validation_new/NEON/NormalizationLayer.cpp b/tests/validation_new/NEON/NormalizationLayer.cpp
index dfe793131a..1da2ed0874 100644
--- a/tests/validation_new/NEON/NormalizationLayer.cpp
+++ b/tests/validation_new/NEON/NormalizationLayer.cpp
@@ -46,12 +46,12 @@ namespace
{
/** Tolerance for float operations */
#ifdef ARM_COMPUTE_ENABLE_FP16
-constexpr float tolerance_f16 = 0.001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.001f);
#endif /* ARM_COMPUTE_ENABLE_FP16 */
-constexpr float tolerance_f32 = 0.00001f;
+constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_qs8 = 2;
-constexpr int16_t tolerance_qs16 = 3;
+constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2);
+constexpr AbsoluteTolerance<int16_t> tolerance_qs16(3);
/** Input data set. */
const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
diff --git a/tests/validation_new/NEON/SoftmaxLayer.cpp b/tests/validation_new/NEON/SoftmaxLayer.cpp
index ce5b8b8359..337ee29986 100644
--- a/tests/validation_new/NEON/SoftmaxLayer.cpp
+++ b/tests/validation_new/NEON/SoftmaxLayer.cpp
@@ -44,12 +44,12 @@ namespace validation
namespace
{
/** Tolerance for float operations */
-constexpr float tolerance_f32 = 0.000001f;
+constexpr AbsoluteTolerance<float> tolerance_f32(0.000001f);
#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.0001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.0001f);
#endif /* ARM_COMPUTE_ENABLE_FP16*/
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_fixed_point = 2;
+constexpr AbsoluteTolerance<int8_t> tolerance_fixed_point(2);
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
diff --git a/tests/validation_new/Validation.cpp b/tests/validation_new/Validation.cpp
index a492bb6505..fec7c10939 100644
--- a/tests/validation_new/Validation.cpp
+++ b/tests/validation_new/Validation.cpp
@@ -128,17 +128,16 @@ void check_border_element(const IAccessor &tensor, const Coordinates &id,
{
const size_t channel_offset = channel * channel_size;
const double target = get_double_data(ptr + channel_offset, tensor.data_type());
- const double ref = get_double_data(static_cast<const uint8_t *>(border_value) + channel_offset, tensor.data_type());
- const bool equal = is_equal(target, ref);
+ const double reference = get_double_data(static_cast<const uint8_t *>(border_value) + channel_offset, tensor.data_type());
- ARM_COMPUTE_TEST_INFO("id = " << id);
- ARM_COMPUTE_TEST_INFO("channel = " << channel);
- ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target);
- ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << ref);
- ARM_COMPUTE_EXPECT_EQUAL(target, ref, framework::LogLevel::DEBUG);
-
- if(!equal)
+ if(!compare<AbsoluteTolerance<double>, double>(target, reference))
{
+ ARM_COMPUTE_TEST_INFO("id = " << id);
+ ARM_COMPUTE_TEST_INFO("channel = " << channel);
+ ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target);
+ ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference);
+ ARM_COMPUTE_EXPECT_EQUAL(target, reference, framework::LogLevel::DEBUG);
+
++num_mismatches;
}
@@ -191,17 +190,16 @@ void validate(const IAccessor &tensor, const void *reference_value)
{
const size_t channel_offset = channel * channel_size;
const double target = get_double_data(ptr + channel_offset, tensor.data_type());
- const double ref = get_double_data(reference_value, tensor.data_type());
- const bool equal = is_equal(target, ref);
+ const double reference = get_double_data(reference_value, tensor.data_type());
- ARM_COMPUTE_TEST_INFO("id = " << id);
- ARM_COMPUTE_TEST_INFO("channel = " << channel);
- ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target);
- ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << ref);
- ARM_COMPUTE_EXPECT_EQUAL(target, ref, framework::LogLevel::DEBUG);
-
- if(!equal)
+ if(!compare<AbsoluteTolerance<double>, double>(target, reference))
{
+ ARM_COMPUTE_TEST_INFO("id = " << id);
+ ARM_COMPUTE_TEST_INFO("channel = " << channel);
+ ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target);
+ ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference);
+ ARM_COMPUTE_EXPECT_EQUAL(target, reference, framework::LogLevel::DEBUG);
+
++num_mismatches;
}
diff --git a/tests/validation_new/Validation.h b/tests/validation_new/Validation.h
index 91b17145be..b21d12932a 100644
--- a/tests/validation_new/Validation.h
+++ b/tests/validation_new/Validation.h
@@ -43,6 +43,88 @@ namespace test
{
namespace validation
{
+/** Class reprensenting an absolute tolerance value. */
+template <typename T>
+class AbsoluteTolerance
+{
+public:
+ /** Underlying type. */
+ using value_type = T;
+
+ /* Default constructor.
+ *
+ * Initialises the tolerance to 0.
+ */
+ AbsoluteTolerance() = default;
+
+ /** Constructor.
+ *
+ * @param[in] value Absolute tolerance value.
+ */
+ explicit constexpr AbsoluteTolerance(T value)
+ : _value{ value }
+ {
+ }
+
+ /** Implicit conversion to the underlying type. */
+ constexpr operator T() const
+ {
+ return _value;
+ }
+
+private:
+ T _value{ std::numeric_limits<T>::epsilon() };
+};
+
+/** Class reprensenting a relative tolerance value. */
+class RelativeTolerance
+{
+public:
+ /** Underlying type. */
+ using value_type = double;
+
+ /* Default constructor.
+ *
+ * Initialises the tolerance to 0.
+ */
+ RelativeTolerance() = default;
+
+ /** Constructor.
+ *
+ * @param[in] value Relative tolerance value.
+ */
+ explicit constexpr RelativeTolerance(value_type value)
+ : _value{ value }
+ {
+ }
+
+ /** Implicit conversion to the underlying type. */
+ constexpr operator value_type() const
+ {
+ return _value;
+ }
+
+private:
+ value_type _value{ 0 };
+};
+
+/** Print AbsoluteTolerance type. */
+template <typename T>
+inline ::std::ostream &operator<<(::std::ostream &os, const AbsoluteTolerance<T> &tolerance)
+{
+ os << static_cast<typename AbsoluteTolerance<T>::value_type>(tolerance);
+
+ return os;
+}
+
+/** Print RelativeTolerance type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const RelativeTolerance &tolerance)
+{
+ os << static_cast<typename RelativeTolerance::value_type>(tolerance);
+
+ return os;
+}
+
template <typename T>
bool compare_dimensions(const Dimensions<T> &dimensions1, const Dimensions<T> &dimensions2)
{
@@ -86,8 +168,8 @@ void validate(const arm_compute::PaddingSize &padding, const arm_compute::Paddin
* reference tensor and test tensor is multiple of wrap_range), but such errors would be detected by
* other test cases.
*/
-template <typename T, typename U = T>
-void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, U tolerance_value = U(0), float tolerance_number = 0.f);
+template <typename T, typename U = AbsoluteTolerance<T>>
+void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, U tolerance_value = U(), float tolerance_number = 0.f);
/** Validate tensors with valid region.
*
@@ -99,8 +181,8 @@ void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, U toler
* reference tensor and test tensor is multiple of wrap_range), but such errors would be detected by
* other test cases.
*/
-template <typename T, typename U = T>
-void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, const ValidRegion &valid_region, U tolerance_value = U(0), float tolerance_number = 0.f);
+template <typename T, typename U = AbsoluteTolerance<T>>
+void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, const ValidRegion &valid_region, U tolerance_value = U(), float tolerance_number = 0.f);
/** Validate tensors against constant value.
*
@@ -126,42 +208,66 @@ void validate(std::vector<unsigned int> classified_labels, std::vector<unsigned
*
* - All values should match
*/
-template <typename T, typename U = T>
-void validate(T target, T ref, U tolerance_abs_error = std::numeric_limits<T>::epsilon(), double tolerance_relative_error = 0.0001f);
+template <typename T, typename U>
+void validate(T target, T reference, U tolerance = AbsoluteTolerance<T>());
-template <typename T, typename U = T>
-bool is_equal(T target, T ref, U max_absolute_error = std::numeric_limits<T>::epsilon(), double max_relative_error = 0.0001f)
+template <typename T>
+struct compare_base
{
- if(!std::isfinite(target) || !std::isfinite(ref))
+ compare_base(typename T::value_type target, typename T::value_type reference, T tolerance = T(0))
+ : _target{ target }, _reference{ reference }, _tolerance{ tolerance }
{
- return false;
}
- // No need further check if they are equal
- if(ref == target)
- {
- return true;
- }
+ typename T::value_type _target{};
+ typename T::value_type _reference{};
+ T _tolerance{};
+};
- // Need this check for the situation when the two values close to zero but have different sign
- if(std::abs(std::abs(ref) - std::abs(target)) <= max_absolute_error)
- {
- return true;
- }
+template <typename T, typename U>
+struct compare;
- double relative_error = 0;
+template <typename U>
+struct compare<AbsoluteTolerance<U>, U> : public compare_base<AbsoluteTolerance<U>>
+{
+ using compare_base<AbsoluteTolerance<U>>::compare_base;
- if(std::abs(target) > std::abs(ref))
+ operator bool()
{
- relative_error = std::abs(static_cast<double>(target - ref) / target);
+ if(!std::isfinite(this->_target) || !std::isfinite(this->_reference))
+ {
+ return false;
+ }
+ else if(this->_target == this->_reference)
+ {
+ return true;
+ }
+
+ return static_cast<U>(std::abs(this->_target - this->_reference)) <= static_cast<U>(this->_tolerance);
}
- else
+};
+
+template <typename U>
+struct compare<RelativeTolerance, U> : public compare_base<RelativeTolerance>
+{
+ using compare_base<RelativeTolerance>::compare_base;
+
+ operator bool()
{
- relative_error = std::abs(static_cast<double>(ref - target) / ref);
- }
+ if(!std::isfinite(_target) || !std::isfinite(_reference))
+ {
+ return false;
+ }
+ else if(_target == _reference)
+ {
+ return true;
+ }
- return relative_error <= max_relative_error;
-}
+ const double relative_change = std::abs(static_cast<double>(_target - _reference)) / _reference;
+
+ return relative_change <= _tolerance;
+ }
+};
template <typename T, typename U>
void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, U tolerance_value, float tolerance_number)
@@ -198,7 +304,7 @@ void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, const V
const T &target_value = reinterpret_cast<const T *>(tensor(id))[c];
const T &reference_value = reinterpret_cast<const T *>(reference(id))[c];
- if(!is_equal(target_value, reference_value, tolerance_value))
+ if(!compare<U, typename U::value_type>(target_value, reference_value, tolerance_value))
{
ARM_COMPUTE_TEST_INFO("id = " << id);
ARM_COMPUTE_TEST_INFO("channel = " << c);
@@ -227,14 +333,12 @@ void validate(const IAccessor &tensor, const SimpleTensor<T> &reference, const V
}
template <typename T, typename U>
-void validate(T target, T ref, U tolerance_abs_error, double tolerance_relative_error)
+void validate(T target, T reference, U tolerance)
{
- const bool equal = is_equal(target, ref, tolerance_abs_error, tolerance_relative_error);
-
- ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << ref);
+ ARM_COMPUTE_TEST_INFO("reference = " << std::setprecision(5) << reference);
ARM_COMPUTE_TEST_INFO("target = " << std::setprecision(5) << target);
- ARM_COMPUTE_TEST_INFO("tolerance = " << std::setprecision(5) << tolerance_abs_error);
- ARM_COMPUTE_EXPECT(equal, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_TEST_INFO("tolerance = " << std::setprecision(5) << tolerance);
+ ARM_COMPUTE_EXPECT((compare<U, typename U::value_type>(target, reference, tolerance)), framework::LogLevel::ERRORS);
}
} // namespace validation
} // namespace test