aboutsummaryrefslogtreecommitdiff
path: root/tests/validation_new/CL
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation_new/CL')
-rw-r--r--tests/validation_new/CL/ActivationLayer.cpp24
-rw-r--r--tests/validation_new/CL/ConvolutionLayer.cpp6
-rw-r--r--tests/validation_new/CL/DirectConvolutionLayer.cpp4
-rw-r--r--tests/validation_new/CL/NormalizationLayer.cpp8
-rw-r--r--tests/validation_new/CL/SoftmaxLayer.cpp6
5 files changed, 25 insertions, 23 deletions
diff --git a/tests/validation_new/CL/ActivationLayer.cpp b/tests/validation_new/CL/ActivationLayer.cpp
index e1cc4e54e2..7f9bcccd11 100644
--- a/tests/validation_new/CL/ActivationLayer.cpp
+++ b/tests/validation_new/CL/ActivationLayer.cpp
@@ -51,46 +51,48 @@ namespace
*
* @return Tolerance depending on the activation function.
*/
-float tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
+AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
{
+ constexpr float epsilon = std::numeric_limits<float>::epsilon();
+
switch(activation)
{
case ActivationLayerInfo::ActivationFunction::LINEAR:
- return data_type == DataType::F16 ? 0.2f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.2f : epsilon);
case ActivationLayerInfo::ActivationFunction::SQUARE:
- return data_type == DataType::F16 ? 0.1f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.1f : epsilon);
case ActivationLayerInfo::ActivationFunction::LOGISTIC:
if(is_data_type_fixed_point(data_type))
{
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
}
else
{
- return data_type == DataType::F16 ? 0.001f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
}
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- return data_type == DataType::F16 ? 0.00001f : 0.f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.00001f : epsilon);
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
case ActivationLayerInfo::ActivationFunction::SQRT:
if(is_data_type_fixed_point(data_type))
{
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
}
else
{
- return data_type == DataType::F16 ? 0.01f : 0.00001f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
}
case ActivationLayerInfo::ActivationFunction::TANH:
if(is_data_type_fixed_point(data_type))
{
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
}
else
{
- return data_type == DataType::F16 ? 0.001f : 0.00001f;
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
}
default:
- return 0.f;
+ return AbsoluteTolerance<float>(epsilon);
}
}
diff --git a/tests/validation_new/CL/ConvolutionLayer.cpp b/tests/validation_new/CL/ConvolutionLayer.cpp
index 398feb7966..9703e0bcba 100644
--- a/tests/validation_new/CL/ConvolutionLayer.cpp
+++ b/tests/validation_new/CL/ConvolutionLayer.cpp
@@ -44,9 +44,9 @@ namespace validation
{
namespace
{
-constexpr float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-constexpr float tolerance_f16 = 0.1f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr AbsoluteTolerance<float> tolerance_f16(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
diff --git a/tests/validation_new/CL/DirectConvolutionLayer.cpp b/tests/validation_new/CL/DirectConvolutionLayer.cpp
index 9cffabae42..1a7cd6b3fb 100644
--- a/tests/validation_new/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation_new/CL/DirectConvolutionLayer.cpp
@@ -43,8 +43,8 @@ namespace validation
{
namespace
{
-constexpr float tolerance_fp16 = 0.1f; /**< Tolerance for floating point tests */
-constexpr float tolerance_fp32 = 0.001f; /**< Tolerance for floating point tests */
+constexpr AbsoluteTolerance<float> tolerance_fp16(0.1f); /**< Tolerance for floating point tests */
+constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
/** Direct convolution data set. */
const auto data = combine(datasets::SmallDirectConvolutionShapes(),
diff --git a/tests/validation_new/CL/NormalizationLayer.cpp b/tests/validation_new/CL/NormalizationLayer.cpp
index 22ca96423a..ebef18a8bd 100644
--- a/tests/validation_new/CL/NormalizationLayer.cpp
+++ b/tests/validation_new/CL/NormalizationLayer.cpp
@@ -46,12 +46,12 @@ namespace
{
/** Tolerance for float operations */
#ifdef ARM_COMPUTE_ENABLE_FP16
-constexpr float tolerance_f16 = 0.001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.001f);
#endif /* ARM_COMPUTE_ENABLE_FP16 */
-constexpr float tolerance_f32 = 0.00001f;
+constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_qs8 = 2;
-constexpr int16_t tolerance_qs16 = 2;
+constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2);
+constexpr AbsoluteTolerance<int16_t> tolerance_qs16(2);
/** Input data set. */
const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })),
diff --git a/tests/validation_new/CL/SoftmaxLayer.cpp b/tests/validation_new/CL/SoftmaxLayer.cpp
index 3edc7b2d6e..d13236a2f9 100644
--- a/tests/validation_new/CL/SoftmaxLayer.cpp
+++ b/tests/validation_new/CL/SoftmaxLayer.cpp
@@ -44,10 +44,10 @@ namespace validation
namespace
{
/** Tolerance for float operations */
-constexpr float tolerance_f16 = 0.002f;
-constexpr float tolerance_f32 = 0.000001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.002f);
+constexpr AbsoluteTolerance<float> tolerance_f32(0.000001f);
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_fixed_point = 2;
+constexpr AbsoluteTolerance<int8_t> tolerance_fixed_point(2);
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",