aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-06-23 10:40:05 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:14:20 +0100
commit383deec6b38f8b00f901d475000d46f8d3e5fb97 (patch)
treedc2e72587ea624d1b0eb06d8559af0e7783d90d0 /tests
parentfabb038a54ca217497c17e31ba7ae098690f2f69 (diff)
downloadComputeLibrary-383deec6b38f8b00f901d475000d46f8d3e5fb97.tar.gz
COMPMID-345: Added support for arm8.2+FP16 in the the validation framework.
Change-Id: Ifef2133d4a0da5456bec147330405b6d58cf6a71 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78676 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/SConscript4
-rw-r--r--tests/TensorLibrary.h13
-rw-r--r--tests/Utils.h12
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp2
-rw-r--r--tests/validation/TensorFactory.h10
-rw-r--r--tests/validation/TensorOperations.h27
-rw-r--r--tests/validation/Validation.cpp8
7 files changed, 52 insertions, 24 deletions
diff --git a/tests/SConscript b/tests/SConscript
index ef39595feb..6be4ddb35e 100644
--- a/tests/SConscript
+++ b/tests/SConscript
@@ -56,10 +56,6 @@ else:
common_env.Append(LIBS = ["arm_compute"])
arm_compute_lib = arm_compute_so
-if env['arch'] == 'arm64-v8.2-a' and ( common_env['validation_tests'] or common_env['benchmark_tests']):
- print("validation_tests=1 and benchmark_tests=1 are not currently supported for arch=arm64-v8.2-a")
- Exit(1)
-
#FIXME Delete before release
if common_env['internal_only']:
common_env.Append(CPPDEFINES=['INTERNAL_ONLY'])
diff --git a/tests/TensorLibrary.h b/tests/TensorLibrary.h
index bdf91c6eda..b05302a9b0 100644
--- a/tests/TensorLibrary.h
+++ b/tests/TensorLibrary.h
@@ -43,6 +43,10 @@
#include <string>
#include <type_traits>
+#if ARM_COMPUTE_ENABLE_FP16
+#include <arm_fp16.h> // needed for float16_t
+#endif
+
namespace arm_compute
{
namespace test
@@ -494,10 +498,10 @@ void TensorLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
fill(tensor, distribution_s64, seed_offset);
break;
}
-#ifdef ENABLE_FP16
+#if ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
{
- std::uniform_real_distribution<float16_t> distribution_f16(std::numeric_limits<float16_t>::lowest(), std::numeric_limits<float16_t>::max());
+ std::uniform_real_distribution<float> distribution_f16(std::numeric_limits<float16_t>::lowest(), std::numeric_limits<float16_t>::max());
fill(tensor, distribution_f16, seed_offset);
break;
}
@@ -589,11 +593,10 @@ void TensorLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
fill(tensor, distribution_s64, seed_offset);
break;
}
-#if ENABLE_FP16
+#if ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
{
- ARM_COMPUTE_ERROR_ON(!(std::is_same<float16_t, D>::value));
- std::uniform_real_distribution<float16_t> distribution_f16(low, high);
+ std::uniform_real_distribution<float_t> distribution_f16(low, high);
fill(tensor, distribution_f16, seed_offset);
break;
}
diff --git a/tests/Utils.h b/tests/Utils.h
index f3622cafaa..b2d4bf4f90 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -38,6 +38,10 @@
#include <string>
#include <type_traits>
+#if ARM_COMPUTE_ENABLE_FP16
+#include <arm_fp16.h> // needed for float16_t
+#endif
+
namespace arm_compute
{
namespace test
@@ -362,6 +366,10 @@ template <> struct promote<int16_t> { using type = int32_t; };
template <> struct promote<uint32_t> { using type = uint64_t; };
template <> struct promote<int32_t> { using type = int64_t; };
template <> struct promote<float> { using type = float; };
+#ifdef ARM_COMPUTE_ENABLE_FP16
+template <> struct promote<float16_t> { using type = float16_t; };
+#endif
+
template <typename T>
using promote_t = typename promote<T>::type;
@@ -513,11 +521,11 @@ void store_value_with_data_type(void *ptr, T value, DataType data_type)
case DataType::S64:
*reinterpret_cast<int64_t *>(ptr) = value;
break;
-#ifdef ENABLE_FP16
+#if ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
*reinterpret_cast<float16_t *>(ptr) = value;
break;
-#endif /* ENABLE_FP16 */
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
case DataType::F32:
*reinterpret_cast<float *>(ptr) = value;
break;
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index a1dbe38bbf..ee2b24db96 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -197,4 +197,4 @@ BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
-#endif \ No newline at end of file
+#endif
diff --git a/tests/validation/TensorFactory.h b/tests/validation/TensorFactory.h
index 48f9d6702f..610425bbfb 100644
--- a/tests/validation/TensorFactory.h
+++ b/tests/validation/TensorFactory.h
@@ -30,6 +30,10 @@
#include "boost_wrapper.h"
+#if ARM_COMPUTE_ENABLE_FP16
+#include <arm_fp16.h> // needed for float16_t
+#endif
+
namespace arm_compute
{
namespace test
@@ -39,7 +43,7 @@ namespace validation
using TensorVariant = boost::variant < Tensor<uint8_t>, Tensor<int8_t>,
Tensor<uint16_t>, Tensor<int16_t>,
Tensor<uint32_t>, Tensor<int32_t>,
-#ifdef ENABLE_FP16
+#ifdef ARM_COMPUTE_ENABLE_FP16
Tensor<float16_t>,
#endif
Tensor<float >>;
@@ -90,10 +94,10 @@ public:
using value_type_s32 = typename match_const<R, int32_t>::type;
v = Tensor<int32_t>(shape, dt, fixed_point_position, reinterpret_cast<value_type_s32 *>(data));
break;
-#ifdef ENABLE_FP16
+#ifdef ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
using value_type_f16 = typename match_const<R, float16_t>::type;
- v = Tensor<float16_t>(raw.shape(), dt, reinterpret_cast<value_type_f16 *>(raw.data()));
+ v = Tensor<float16_t>(shape, dt, fixed_point_position, reinterpret_cast<value_type_f16 *>(data));
break;
#endif
case DataType::F32:
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 7337924b47..56cc657daa 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -49,13 +49,24 @@ namespace tensor_operations
{
namespace
{
+template <class T>
+struct is_floating_point
+ : std::integral_constant < bool,
+ std::is_same<float, typename std::remove_cv<T>::type>::value ||
+#if ARM_COMPUTE_ENABLE_FP16
+ std::is_same<float16_t, typename std::remove_cv<T>::type>::value ||
+#endif
+ std::is_same<double, typename std::remove_cv<T>::type>::value || std::is_same<long double, typename std::remove_cv<T>::type>::value >
+{
+};
+
bool is_valid_pixel(int i, int min, int max)
{
return (i >= min && i < max);
}
// 3D convolution for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int8_t fixed_point_position)
{
const int half_width_weights = width_weights / 2;
@@ -525,7 +536,7 @@ void depth_convert<int16_t, int32_t>(const Tensor<int16_t> &in, Tensor<int32_t>
}
// Matrix multiplication for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
{
const int M = out.shape().y();
@@ -609,7 +620,7 @@ void pixel_wise_multiplication(const Tensor<T1> &in1, const Tensor<T2> &in2, Ten
for(int i = 0; i < in1.num_elements(); ++i)
{
double val = static_cast<intermediate_type>(in1[i]) * static_cast<intermediate_type>(in2[i]) * static_cast<double>(scale);
- if(std::is_floating_point<T3>::value)
+ if(is_floating_point<T3>::value)
{
out[i] = val;
}
@@ -705,7 +716,7 @@ void threshold(const Tensor<T> &in, Tensor<T> &out, uint8_t threshold, uint8_t f
}
// Activation Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
{
const T a = static_cast<T>(act_info.a());
@@ -838,7 +849,7 @@ void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor
}
// Batch Normalization Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
{
const int cols = static_cast<int>(in.shape()[0]);
@@ -940,7 +951,7 @@ void fully_connected_layer(const Tensor<T> &in, const Tensor<T> &weights, const
}
// Normalization Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
{
const uint32_t norm_size = norm_info.norm_size();
@@ -1235,7 +1246,7 @@ void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_in
hstart = std::max(hstart, 0);
wend = std::min(wend, w_in);
hend = std::min(hend, h_in);
- if(std::is_floating_point<T>::value)
+ if(is_floating_point<T>::value)
{
for(int y = hstart; y < hend; ++y)
{
@@ -1267,7 +1278,7 @@ void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_in
}
// Softmax Layer
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
{
const int cols = static_cast<int>(in.shape()[0]);
diff --git a/tests/validation/Validation.cpp b/tests/validation/Validation.cpp
index 17dc6952ca..8aada0cb0e 100644
--- a/tests/validation/Validation.cpp
+++ b/tests/validation/Validation.cpp
@@ -40,6 +40,10 @@
#include <cstdint>
#include <iomanip>
+#if ARM_COMPUTE_ENABLE_FP16
+#include <arm_fp16.h> // needed for float16_t
+#endif
+
namespace arm_compute
{
namespace test
@@ -82,7 +86,7 @@ double get_double_data(const void *ptr, DataType data_type)
return *reinterpret_cast<const uint64_t *>(ptr);
case DataType::S64:
return *reinterpret_cast<const int64_t *>(ptr);
-#if ENABLE_FP16
+#if ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
return *reinterpret_cast<const float16_t *>(ptr);
#endif
@@ -384,6 +388,8 @@ void validate(const IAccessor &tensor, BorderSize border_size, const BorderMode
void validate(std::vector<unsigned int> classified_labels, std::vector<unsigned int> expected_labels)
{
+ ARM_COMPUTE_UNUSED(classified_labels);
+ ARM_COMPUTE_UNUSED(expected_labels);
BOOST_TEST(expected_labels.size() != 0);
BOOST_TEST(classified_labels.size() == expected_labels.size());