From 383deec6b38f8b00f901d475000d46f8d3e5fb97 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Fri, 23 Jun 2017 10:40:05 +0100 Subject: COMPMID-345: Added support for arm8.2+FP16 in the the validation framework. Change-Id: Ifef2133d4a0da5456bec147330405b6d58cf6a71 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78676 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- tests/validation/TensorOperations.h | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'tests/validation/TensorOperations.h') diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h index 7337924b47..56cc657daa 100644 --- a/tests/validation/TensorOperations.h +++ b/tests/validation/TensorOperations.h @@ -49,13 +49,24 @@ namespace tensor_operations { namespace { +template +struct is_floating_point + : std::integral_constant < bool, + std::is_same::type>::value || +#if ARM_COMPUTE_ENABLE_FP16 + std::is_same::type>::value || +#endif + std::is_same::type>::value || std::is_same::type>::value > +{ +}; + bool is_valid_pixel(int i, int min, int max) { return (i >= min && i < max); } // 3D convolution for floating point type -template ::value, int>::type * = nullptr> +template ::value, int>::type * = nullptr> void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int8_t fixed_point_position) { const int half_width_weights = width_weights / 2; @@ -525,7 +536,7 @@ void depth_convert(const Tensor &in, Tensor } // Matrix multiplication for floating point type -template ::value, int>::type * = nullptr> +template ::value, int>::type * = nullptr> void gemm(const Tensor &in1, const Tensor &in2, const Tensor &in3, Tensor &out, float alpha, float beta) { const int M = out.shape().y(); @@ -609,7 +620,7 @@ void pixel_wise_multiplication(const Tensor &in1, const Tensor &in2, Ten for(int i = 0; i < in1.num_elements(); ++i) { double val = static_cast(in1[i]) * static_cast(in2[i]) * static_cast(scale); - if(std::is_floating_point::value) + if(is_floating_point::value) { out[i] = val; } @@ -705,7 +716,7 @@ void threshold(const Tensor &in, Tensor &out, uint8_t threshold, uint8_t f } // Activation Layer for floating point type -template ::value, int>::type * = nullptr> +template ::value, int>::type * = nullptr> void activation_layer(const Tensor &in, Tensor &out, ActivationLayerInfo act_info) { const T a = static_cast(act_info.a()); @@ -838,7 +849,7 @@ void batch_normalization_layer(const Tensor &in, Tensor &out, const Tensor } // Batch Normalization Layer for floating point type -template ::value, int>::type * = nullptr> +template ::value, int>::type * = nullptr> void batch_normalization_layer(const Tensor &in, Tensor &out, const Tensor &mean, const Tensor &var, const Tensor &beta, const Tensor &gamma, float epsilon, int fixed_point_position) { const int cols = static_cast(in.shape()[0]); @@ -940,7 +951,7 @@ void fully_connected_layer(const Tensor &in, const Tensor &weights, const } // Normalization Layer for floating point type -template ::value, int>::type * = nullptr> +template ::value, int>::type * = nullptr> void normalization_layer(const Tensor &in, Tensor &out, NormalizationLayerInfo norm_info) { const uint32_t norm_size = norm_info.norm_size(); @@ -1235,7 +1246,7 @@ void pooling_layer(const Tensor &in, Tensor &out, PoolingLayerInfo pool_in hstart = std::max(hstart, 0); wend = std::min(wend, w_in); hend = std::min(hend, h_in); - if(std::is_floating_point::value) + if(is_floating_point::value) { for(int y = hstart; y < hend; ++y) { @@ -1267,7 +1278,7 @@ void pooling_layer(const Tensor &in, Tensor &out, PoolingLayerInfo pool_in } // Softmax Layer -template ::value, int>::type * = nullptr> +template ::value, int>::type * = nullptr> void softmax_layer(const Tensor &in, Tensor &out) { const int cols = static_cast(in.shape()[0]); -- cgit v1.2.1