aboutsummaryrefslogtreecommitdiff
path: root/tests/validation_new/NEON
diff options
context:
space:
mode:
authorMoritz Pflanzer <moritz.pflanzer@arm.com>2017-08-02 09:42:27 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commit6106a4de410e7cc59515dd889e159bee7aa45d35 (patch)
tree80dd491ae3865dd00a8e272bbf66b72274a0efe2 /tests/validation_new/NEON
parentd8e765ba79772ecc3deda89ae3adab903c744296 (diff)
downloadComputeLibrary-6106a4de410e7cc59515dd889e159bee7aa45d35.tar.gz
COMPMID-415: Use absolute and relative tolerance
Change-Id: Ib779fa307e05fa67172ddaf521239b4c746debc8 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/82229 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests/validation_new/NEON')
-rw-r--r--tests/validation_new/NEON/ActivationLayer.cpp12
-rw-r--r--tests/validation_new/NEON/ConvolutionLayer.cpp8
-rw-r--r--tests/validation_new/NEON/DirectConvolutionLayer.cpp8
-rw-r--r--tests/validation_new/NEON/NormalizationLayer.cpp8
-rw-r--r--tests/validation_new/NEON/SoftmaxLayer.cpp6
5 files changed, 21 insertions, 21 deletions
diff --git a/tests/validation_new/NEON/ActivationLayer.cpp b/tests/validation_new/NEON/ActivationLayer.cpp
index db0faaecdf..bc2fe603fd 100644
--- a/tests/validation_new/NEON/ActivationLayer.cpp
+++ b/tests/validation_new/NEON/ActivationLayer.cpp
@@ -51,7 +51,7 @@ namespace
*
* @return Tolerance depending on the activation function.
*/
-float tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation)
+AbsoluteTolerance<float> tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation)
{
switch(activation)
{
@@ -62,17 +62,17 @@ float tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction acti
switch(data_type)
{
case DataType::QS8:
- return 5.f;
+ return AbsoluteTolerance<float>(5.f);
case DataType::QS16:
- return 11.f;
+ return AbsoluteTolerance<float>(11.f);
case DataType::F16:
- return 0.01f;
+ return AbsoluteTolerance<float>(0.01f);
default:
- return 0.00001f;
+ return AbsoluteTolerance<float>(0.00001f);
}
break;
default:
- return 0.f;
+ return AbsoluteTolerance<float>(0.f);
}
}
diff --git a/tests/validation_new/NEON/ConvolutionLayer.cpp b/tests/validation_new/NEON/ConvolutionLayer.cpp
index af33cc0707..1efff02428 100644
--- a/tests/validation_new/NEON/ConvolutionLayer.cpp
+++ b/tests/validation_new/NEON/ConvolutionLayer.cpp
@@ -44,11 +44,11 @@ namespace validation
{
namespace
{
-const float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+const AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.01f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-const float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
diff --git a/tests/validation_new/NEON/DirectConvolutionLayer.cpp b/tests/validation_new/NEON/DirectConvolutionLayer.cpp
index a46f5a5dcc..90c4abe9c2 100644
--- a/tests/validation_new/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation_new/NEON/DirectConvolutionLayer.cpp
@@ -43,11 +43,11 @@ namespace validation
{
namespace
{
-constexpr float tolerance_qs = 1.f; /**< Tolerance for fixed point tests */
+constexpr AbsoluteTolerance<float> tolerance_qs(1.f); /**< Tolerance for fixed point tests */
#ifdef ARM_COMPUTE_ENABLE_FP16
-constexpr float tolerance_fp16 = 0.01f; /**< Tolerance for half precision floating point tests */
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-constexpr float tolerance_fp32 = 0.001f; /**< Tolerance for floating point tests */
+constexpr AbsoluteTolerance<float> tolerance_fp16(0.01f); /**< Tolerance for half precision floating point tests */
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
/** Direct convolution data set. */
const auto data = combine(datasets::SmallDirectConvolutionShapes(),
diff --git a/tests/validation_new/NEON/NormalizationLayer.cpp b/tests/validation_new/NEON/NormalizationLayer.cpp
index dfe793131a..1da2ed0874 100644
--- a/tests/validation_new/NEON/NormalizationLayer.cpp
+++ b/tests/validation_new/NEON/NormalizationLayer.cpp
@@ -46,12 +46,12 @@ namespace
{
/** Tolerance for float operations */
#ifdef ARM_COMPUTE_ENABLE_FP16
-constexpr float tolerance_f16 = 0.001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.001f);
#endif /* ARM_COMPUTE_ENABLE_FP16 */
-constexpr float tolerance_f32 = 0.00001f;
+constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_qs8 = 2;
-constexpr int16_t tolerance_qs16 = 3;
+constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2);
+constexpr AbsoluteTolerance<int16_t> tolerance_qs16(3);
/** Input data set. */
const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
diff --git a/tests/validation_new/NEON/SoftmaxLayer.cpp b/tests/validation_new/NEON/SoftmaxLayer.cpp
index ce5b8b8359..337ee29986 100644
--- a/tests/validation_new/NEON/SoftmaxLayer.cpp
+++ b/tests/validation_new/NEON/SoftmaxLayer.cpp
@@ -44,12 +44,12 @@ namespace validation
namespace
{
/** Tolerance for float operations */
-constexpr float tolerance_f32 = 0.000001f;
+constexpr AbsoluteTolerance<float> tolerance_f32(0.000001f);
#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.0001f;
+constexpr AbsoluteTolerance<float> tolerance_f16(0.0001f);
#endif /* ARM_COMPUTE_ENABLE_FP16*/
/** Tolerance for fixed point operations */
-constexpr int8_t tolerance_fixed_point = 2;
+constexpr AbsoluteTolerance<int8_t> tolerance_fixed_point(2);
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",