From 014333d73883c3872e458cedda5ccef586a7ccd4 Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Mon, 2 Jul 2018 09:13:49 +0100 Subject: COMPMID-970 : Remove QS8 / QS16 support Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins Reviewed-by: Michele DiGiorgio Reviewed-by: Anthony Barbier --- tests/validation/reference/AbsoluteDifference.cpp | 3 +- tests/validation/reference/Accumulate.cpp | 3 +- tests/validation/reference/ActivationLayer.cpp | 69 +---------- tests/validation/reference/ArithmeticAddition.cpp | 1 - tests/validation/reference/ArithmeticDivision.cpp | 1 - .../reference/BatchNormalizationLayer.cpp | 55 +-------- .../validation/reference/BatchNormalizationLayer.h | 6 +- tests/validation/reference/ChannelShuffle.cpp | 2 +- tests/validation/reference/ConvolutionLayer.cpp | 6 +- tests/validation/reference/DeconvolutionLayer.cpp | 2 +- .../validation/reference/DepthConcatenateLayer.cpp | 4 +- tests/validation/reference/DepthConvertLayer.cpp | 58 +--------- .../reference/DepthwiseConvolutionLayer.cpp | 4 +- tests/validation/reference/FlattenLayer.cpp | 4 +- tests/validation/reference/FullyConnectedLayer.cpp | 64 ++--------- tests/validation/reference/GEMM.cpp | 6 +- tests/validation/reference/LocallyConnected.cpp | 2 +- tests/validation/reference/NormalizationLayer.cpp | 8 +- tests/validation/reference/Permute.cpp | 2 +- tests/validation/reference/PoolingLayer.cpp | 126 +-------------------- tests/validation/reference/SoftmaxLayer.cpp | 8 +- .../validation/reference/WidthConcatenateLayer.cpp | 2 - 22 files changed, 37 insertions(+), 399 deletions(-) (limited to 'tests/validation/reference') diff --git a/tests/validation/reference/AbsoluteDifference.cpp b/tests/validation/reference/AbsoluteDifference.cpp index f518e67324..f9fce5b42a 100644 --- a/tests/validation/reference/AbsoluteDifference.cpp +++ b/tests/validation/reference/AbsoluteDifference.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #include "AbsoluteDifference.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/Accumulate.cpp b/tests/validation/reference/Accumulate.cpp index 29a2007bbd..7f34be9663 100644 --- a/tests/validation/reference/Accumulate.cpp +++ b/tests/validation/reference/Accumulate.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #include "Accumulate.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp index df7f6534bc..9455effd72 100644 --- a/tests/validation/reference/ActivationLayer.cpp +++ b/tests/validation/reference/ActivationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #include "ActivationLayer.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute @@ -39,7 +38,7 @@ template ::value, int>: SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info) { // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor dst{ src.shape(), src.data_type(), 1 }; // Compute reference const T a(info.a()); @@ -92,68 +91,6 @@ SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo return dst; } -template ::value, int>::type> -SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info) -{ - using namespace fixed_point_arithmetic; - - // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; - - // Compute reference - const int fixed_point_position = src.fixed_point_position(); - const fixed_point a(info.a(), fixed_point_position); - const fixed_point b(info.b(), fixed_point_position); - const fixed_point const_0(0, fixed_point_position); - const fixed_point const_1(1, fixed_point_position); - - for(int i = 0; i < src.num_elements(); ++i) - { - fixed_point x(src[i], fixed_point_position, true); - - switch(info.activation()) - { - case ActivationLayerInfo::ActivationFunction::ABS: - dst[i] = abs(x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LINEAR: - dst[i] = add(b, mul(a, x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LOGISTIC: - dst[i] = (const_1 / (const_1 + exp(-x))).raw(); - break; - case ActivationLayerInfo::ActivationFunction::RELU: - dst[i] = max(const_0, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: - dst[i] = min(a, max(const_0, x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU: - dst[i] = min(a, max(b, x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - dst[i] = (x > const_0) ? x.raw() : mul(a, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SOFT_RELU: - dst[i] = log(const_1 + exp(x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SQRT: - dst[i] = (const_1 / inv_sqrt(x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SQUARE: - dst[i] = mul(x, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::TANH: - dst[i] = mul(a, tanh(mul(b, x))).raw(); - break; - default: - ARM_COMPUTE_ERROR("Unsupported activation function"); - } - } - - return dst; -} - template <> SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info) { @@ -165,8 +102,6 @@ SimpleTensor activation_layer(const SimpleTensor &src template SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info); template SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info); -template SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info); -template SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/ArithmeticAddition.cpp b/tests/validation/reference/ArithmeticAddition.cpp index f26838dcb8..4569277103 100644 --- a/tests/validation/reference/ArithmeticAddition.cpp +++ b/tests/validation/reference/ArithmeticAddition.cpp @@ -24,7 +24,6 @@ #include "ArithmeticAddition.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/ArithmeticDivision.cpp b/tests/validation/reference/ArithmeticDivision.cpp index 934e89052f..0102231993 100644 --- a/tests/validation/reference/ArithmeticDivision.cpp +++ b/tests/validation/reference/ArithmeticDivision.cpp @@ -24,7 +24,6 @@ #include "ArithmeticDivision.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp index c8badacc79..3d1a6ed7d7 100644 --- a/tests/validation/reference/BatchNormalizationLayer.cpp +++ b/tests/validation/reference/BatchNormalizationLayer.cpp @@ -36,56 +36,11 @@ namespace validation { namespace reference { -// Batch Normalization Layer for fixed point type -template ::value, int>::type *> -SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, const SimpleTensor &gamma, float epsilon, - ActivationLayerInfo act_info, int fixed_point_position) -{ - ARM_COMPUTE_UNUSED(act_info); - SimpleTensor result(src.shape(), src.data_type()); - - const auto cols = static_cast(src.shape()[0]); - const auto rows = static_cast(src.shape()[1]); - const auto depth = static_cast(src.shape()[2]); - const int upper_dims = src.shape().total_size() / (cols * rows * depth); - - for(int r = 0; r < upper_dims; ++r) - { - for(int i = 0; i < depth; ++i) - { - for(int k = 0; k < rows; ++k) - { - for(int l = 0; l < cols; ++l) - { - const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth; - - fixed_point_arithmetic::fixed_point src_qs(src[pos], fixed_point_position, true); - fixed_point_arithmetic::fixed_point var_qs(var[i], fixed_point_position, true); - fixed_point_arithmetic::fixed_point mean_qs(mean[i], fixed_point_position, true); - fixed_point_arithmetic::fixed_point beta_qs(beta[i], fixed_point_position, true); - fixed_point_arithmetic::fixed_point gamma_qs(gamma[i], fixed_point_position, true); - fixed_point_arithmetic::fixed_point epsilon_qs(epsilon, fixed_point_position); - - auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs); - auto numerator = src_qs - mean_qs; - auto x_bar = numerator * denominator; - x_bar = beta_qs + x_bar * gamma_qs; - result[pos] = x_bar.raw(); - } - } - } - } - - return result; -} - // Batch Normalization Layer for floating point type template ::value, int>::type *> SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, const SimpleTensor &gamma, float epsilon, - ActivationLayerInfo act_info, int fixed_point_position) + ActivationLayerInfo act_info) { - ARM_COMPUTE_UNUSED(fixed_point_position); - SimpleTensor result(src.shape(), src.data_type()); const auto cols = static_cast(src.shape()[0]); @@ -119,14 +74,10 @@ SimpleTensor batch_normalization_layer(const SimpleTensor &src, const Simp return result; } template SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, - const SimpleTensor &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); -template SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, - const SimpleTensor &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); -template SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, - const SimpleTensor &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); + const SimpleTensor &gamma, float epsilon, ActivationLayerInfo act_info); template SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, - const SimpleTensor &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); + const SimpleTensor &gamma, float epsilon, ActivationLayerInfo act_info); } // namespace reference } // namespace validation diff --git a/tests/validation/reference/BatchNormalizationLayer.h b/tests/validation/reference/BatchNormalizationLayer.h index 329909dab4..b45d820412 100644 --- a/tests/validation/reference/BatchNormalizationLayer.h +++ b/tests/validation/reference/BatchNormalizationLayer.h @@ -37,13 +37,11 @@ namespace reference { template ::value, int>::type * = nullptr> SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, const SimpleTensor &gamma, float epsilon, - ActivationLayerInfo act_info, - int fixed_point_position); + ActivationLayerInfo act_info); template ::value, int>::type * = nullptr> SimpleTensor batch_normalization_layer(const SimpleTensor &src, const SimpleTensor &mean, const SimpleTensor &var, const SimpleTensor &beta, const SimpleTensor &gamma, float epsilon, - ActivationLayerInfo act_info, - int fixed_point_position); + ActivationLayerInfo act_info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/ChannelShuffle.cpp b/tests/validation/reference/ChannelShuffle.cpp index c4d8d50e3d..b8aa9203ab 100644 --- a/tests/validation/reference/ChannelShuffle.cpp +++ b/tests/validation/reference/ChannelShuffle.cpp @@ -39,7 +39,7 @@ template SimpleTensor channel_shuffle(const SimpleTensor &src, int num_groups) { // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() }; + SimpleTensor dst{ src.shape(), src.data_type(), src.num_channels(), src.quantization_info() }; const int M = src.shape()[0]; const int N = src.shape()[1]; diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp index fe558ba4af..00c839d2df 100644 --- a/tests/validation/reference/ConvolutionLayer.cpp +++ b/tests/validation/reference/ConvolutionLayer.cpp @@ -108,7 +108,7 @@ SimpleTensor convolution_layer(const SimpleTensor &src, const SimpleTensor const Size2D &dilation) { // Create reference - SimpleTensor dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() }; + SimpleTensor dst{ output_shape, src.data_type(), 1, src.quantization_info() }; if(src.data_layout() == DataLayout::NHWC) { @@ -128,10 +128,6 @@ template SimpleTensor convolution_layer(const SimpleTensor &src, c const PadStrideInfo &info, const Size2D &dilation); template SimpleTensor convolution_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &output_shape, const PadStrideInfo &info, const Size2D &dilation); -template SimpleTensor convolution_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &output_shape, - const PadStrideInfo &info, const Size2D &dilation); -template SimpleTensor convolution_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &output_shape, - const PadStrideInfo &info, const Size2D &dilation); template SimpleTensor convolution_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &output_shape, const PadStrideInfo &info, const Size2D &dilation); } // namespace reference diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp index 35437084b8..d073bbf7a1 100644 --- a/tests/validation/reference/DeconvolutionLayer.cpp +++ b/tests/validation/reference/DeconvolutionLayer.cpp @@ -46,7 +46,7 @@ SimpleTensor deconvolution_layer(const SimpleTensor &src, const SimpleTens int out_y = src.shape().y() + (src.shape().y() - 1) * (stride_y - 1) + a.second + 2 * info.pad().second; scaled_shape.set(0, out_x); scaled_shape.set(1, out_y); - SimpleTensor scaled{ scaled_shape, src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor scaled{ scaled_shape, src.data_type(), 1 }; const int width_in = src.shape().x(); const int height_in = src.shape().y(); diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp index 9a7248493d..c9a23520c7 100644 --- a/tests/validation/reference/DepthConcatenateLayer.cpp +++ b/tests/validation/reference/DepthConcatenateLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -95,8 +95,6 @@ SimpleTensor depthconcatenate_layer(const std::vector> &srcs) template SimpleTensor depthconcatenate_layer(const std::vector> &srcs); template SimpleTensor depthconcatenate_layer(const std::vector> &srcs); -template SimpleTensor depthconcatenate_layer(const std::vector> &srcs); -template SimpleTensor depthconcatenate_layer(const std::vector> &srcs); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp index dd095b8912..022007720a 100644 --- a/tests/validation/reference/DepthConvertLayer.cpp +++ b/tests/validation/reference/DepthConvertLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -36,44 +36,6 @@ namespace validation { namespace reference { -template < typename T1, typename T2, typename std::enable_if < std::is_integral::value &&std::is_floating_point::value, int >::type > -SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_UNUSED(policy); - ARM_COMPUTE_UNUSED(shift); - - using namespace fixed_point_arithmetic; - SimpleTensor result(src.shape(), dt_out); - - const int fixed_point_position = src.fixed_point_position(); - - for(int i = 0; i < src.num_elements(); ++i) - { - result[i] = static_cast(fixed_point(src[i], fixed_point_position, true)); - } - - return result; -} - -template < typename T1, typename T2, typename std::enable_if < std::is_floating_point::value &&std::is_integral::value, int >::type > -SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_UNUSED(policy); - ARM_COMPUTE_UNUSED(shift); - - using namespace fixed_point_arithmetic; - SimpleTensor result(src.shape(), dt_out, 1, src.fixed_point_position()); - - const int fixed_point_position = result.fixed_point_position(); - - for(int i = 0; i < src.num_elements(); ++i) - { - result[i] = fixed_point(src[i], fixed_point_position).raw(); - } - - return result; -} - template < typename T1, typename T2, typename std::enable_if < std::is_integral::value &&std::is_integral::value &&!std::is_same::value, int >::type > SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift) { @@ -126,20 +88,6 @@ SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, Con return result; } -template < typename T1, typename T2, typename std::enable_if < std::is_floating_point::value &&is_floating_point::value, int >::type > -SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_UNUSED(policy); - ARM_COMPUTE_UNUSED(shift); - - SimpleTensor result(src.shape(), dt_out); - - for(int i = 0; i < src.num_elements(); ++i) - { - result[i] = static_cast(src[i]); - } -} - template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); @@ -147,10 +95,6 @@ template SimpleTensor depth_convert(const SimpleTensor &src, template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); -template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); -template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); -template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); -template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp index 10c617e953..d8f3cbae49 100644 --- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp @@ -53,7 +53,7 @@ template SimpleTensor depthwise_convolution(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info, unsigned int depth_multiplier) { - SimpleTensor dst{ dst_shape, src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor dst{ dst_shape, src.data_type(), 1 }; // Compute reference const int filter_width = weights.shape().x(); @@ -122,7 +122,7 @@ template <> SimpleTensor depthwise_convolution(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info, unsigned int depth_multiplier) { - SimpleTensor dst{ dst_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() }; + SimpleTensor dst{ dst_shape, src.data_type(), 1, src.quantization_info() }; // Create reference const int input_offset = -src.quantization_info().offset; diff --git a/tests/validation/reference/FlattenLayer.cpp b/tests/validation/reference/FlattenLayer.cpp index 44f4d93178..e140d752a0 100644 --- a/tests/validation/reference/FlattenLayer.cpp +++ b/tests/validation/reference/FlattenLayer.cpp @@ -36,7 +36,7 @@ namespace reference template SimpleTensor flatten_layer(const SimpleTensor &src, const TensorShape &shape_flatten) { - SimpleTensor dst(shape_flatten, src.data_type(), 1, src.fixed_point_position()); + SimpleTensor dst(shape_flatten, src.data_type(), 1); // Note: Since the reference implementation does not use padding bytes, we can copy directly the content of the source tensor std::copy(src.data(), src.data() + src.num_elements(), dst.data()); @@ -46,8 +46,6 @@ SimpleTensor flatten_layer(const SimpleTensor &src, const TensorShape &sha template SimpleTensor flatten_layer(const SimpleTensor &src, const TensorShape &shape_flatten); template SimpleTensor flatten_layer(const SimpleTensor &src, const TensorShape &shape_flatten); -template SimpleTensor flatten_layer(const SimpleTensor &src, const TensorShape &shape_flatten); -template SimpleTensor flatten_layer(const SimpleTensor &src, const TensorShape &shape_flatten); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp index 5384715ace..3ef10eacea 100644 --- a/tests/validation/reference/FullyConnectedLayer.cpp +++ b/tests/validation/reference/FullyConnectedLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -44,10 +44,8 @@ namespace // Vector matrix multiply for floating point template < typename T, typename TB, typename std::enable_if < is_floating_point::value &&is_floating_point::value, int >::type = 0 > void vector_matrix_multiply(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, SimpleTensor &dst, int offset_src, int offset_dst, int cols_weights, - int rows_weights, uint8_t fixed_point_position) + int rows_weights) { - ARM_COMPUTE_UNUSED(fixed_point_position); - const T *src_ptr = src.data() + offset_src; const T *weights_ptr = weights.data(); const TB *bias_ptr = bias.data(); @@ -60,57 +58,16 @@ void vector_matrix_multiply(const SimpleTensor &src, const SimpleTensor &w } } -// Vector matrix multiply for fixed point type -template < typename T, typename TB, typename std::enable_if < std::is_integral::value &&std::is_integral::value, int >::type = 0 > -void vector_matrix_multiply(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, SimpleTensor &dst, int offset_src, int offset_dst, int cols_weights, - int rows_weights, uint8_t fixed_point_position) +// Vector matrix multiply for quantized type +template < typename T, typename TB, typename std::enable_if < std::is_same::value &&std::is_same::value, int >::type = 0 > +void vector_matrix_multiply(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, SimpleTensor &dst, int offset_src, int offset_dst, + int cols_weights, int rows_weights) { const T *src_ptr = src.data() + offset_src; const T *weights_ptr = weights.data(); const TB *bias_ptr = bias.data(); T *dst_ptr = dst.data() + offset_dst; - using namespace fixed_point_arithmetic; - using promoted_type = fixed_point_arithmetic::traits::promote_t; - - for(int y = 0; y < rows_weights; ++y) - { - // Reset accumulator - fixed_point acc(0, fixed_point_position); - - for(int x = 0; x < cols_weights; ++x) - { - const fixed_point i_value(src_ptr[x], fixed_point_position, true); - const fixed_point w_value(weights_ptr[x], fixed_point_position, true); - acc = acc + i_value * w_value; - } - - // Get the bias - const fixed_point b(bias_ptr[y], fixed_point_position, true); - - // Convert back and accumulate the bias - fixed_point res(acc); - res = res + b; - - // Store the result - dst_ptr[y] = res.raw(); - - weights_ptr += cols_weights; - } -} - -// Vector matrix multiply for quantized type -template <> -void vector_matrix_multiply(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, SimpleTensor &dst, int offset_src, int offset_dst, - int cols_weights, int rows_weights, uint8_t fixed_point_position) -{ - ARM_COMPUTE_UNUSED(fixed_point_position); - - const uint8_t *src_ptr = src.data() + offset_src; - const uint8_t *weights_ptr = weights.data(); - const int32_t *bias_ptr = bias.data(); - uint8_t *dst_ptr = dst.data() + offset_dst; - const int input_offset = -src.quantization_info().offset; const float input_scale = src.quantization_info().scale; const int weights_offset = -weights.quantization_info().offset; @@ -141,7 +98,7 @@ void vector_matrix_multiply(const SimpleTensor &src, const SimpleTensor acc = utility::clamp(acc, 0, 255); // Store the result - dst_ptr[y] = static_cast(acc); + dst_ptr[y] = static_cast(acc); weights_ptr += cols_weights; } @@ -152,7 +109,7 @@ template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape) { // Create reference - SimpleTensor dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() }; + SimpleTensor dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.quantization_info() }; // Sanity checks const int num_batch_dimensions = std::max(0, static_cast(dst_shape.num_dimensions()) - 1); @@ -183,8 +140,7 @@ SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTe offset_in, offset_out, cols_weights, - rows_weights, - src.fixed_point_position()); + rows_weights); } return dst; @@ -192,8 +148,6 @@ SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTe template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); -template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); -template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); } // namespace reference } // namespace validation diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp index f9dcfcbdd0..7378ada4ab 100644 --- a/tests/validation/reference/GEMM.cpp +++ b/tests/validation/reference/GEMM.cpp @@ -38,7 +38,7 @@ template ::value, int>: SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta) { // Create reference - SimpleTensor dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() }; + SimpleTensor dst{ c.shape(), c.data_type(), 1 }; // Compute reference const int M = a.shape().y(); @@ -91,7 +91,7 @@ SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const S using namespace fixed_point_arithmetic; // Create reference - SimpleTensor dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() }; + SimpleTensor dst{ c.shape(), c.data_type(), 1 }; // Compute reference using promoted_type = fixed_point_arithmetic::traits::promote_t; @@ -156,8 +156,6 @@ SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const S template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); -template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); -template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/LocallyConnected.cpp b/tests/validation/reference/LocallyConnected.cpp index 08e3f02761..ecc582b181 100644 --- a/tests/validation/reference/LocallyConnected.cpp +++ b/tests/validation/reference/LocallyConnected.cpp @@ -41,7 +41,7 @@ template SimpleTensor locally_connected(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &output_shape, const PadStrideInfo &info) { // Create reference - SimpleTensor dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() }; + SimpleTensor dst{ output_shape, src.data_type(), 1, src.quantization_info() }; // Compute reference const int width_in = src.shape().x(); diff --git a/tests/validation/reference/NormalizationLayer.cpp b/tests/validation/reference/NormalizationLayer.cpp index 226af96fe3..85872c8f90 100644 --- a/tests/validation/reference/NormalizationLayer.cpp +++ b/tests/validation/reference/NormalizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,7 +38,7 @@ template ::value, int>: SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info) { // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor dst{ src.shape(), src.data_type(), 1 }; // Compute reference const uint32_t norm_size = info.norm_size(); @@ -152,7 +152,7 @@ SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLay using namespace fixed_point_arithmetic; // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor dst{ src.shape(), src.data_type(), 1 }; // Compute reference const int fixed_point_position = src.fixed_point_position(); @@ -267,8 +267,6 @@ SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLay template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); -template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); -template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/Permute.cpp b/tests/validation/reference/Permute.cpp index bbb2e8d4d7..29c3c5cda8 100644 --- a/tests/validation/reference/Permute.cpp +++ b/tests/validation/reference/Permute.cpp @@ -42,7 +42,7 @@ SimpleTensor permute(const SimpleTensor &src, PermutationVector perm) permute(dst_shape, perm); // Create reference - SimpleTensor dst{ dst_shape, src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() }; + SimpleTensor dst{ dst_shape, src.data_type(), src.num_channels(), src.quantization_info() }; // Compute reference for(int i = 0; i < src.num_elements(); ++i) diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp index 69734545c9..e9054b9043 100644 --- a/tests/validation/reference/PoolingLayer.cpp +++ b/tests/validation/reference/PoolingLayer.cpp @@ -44,7 +44,7 @@ SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y())); // Create reference - SimpleTensor dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1 }; const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width; const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height; @@ -152,128 +152,6 @@ SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo return dst; } -template ::value, int>::type> -SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo &info) -{ - ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y())); - - const auto w_src = static_cast(src.shape()[0]); - const auto h_src = static_cast(src.shape()[1]); - const int upper_dims = src.shape().total_size() / (w_src * h_src); - - const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width; - const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height; - PoolingType type = info.pool_type(); - int pool_stride_x = info.pad_stride_info().stride().first; - int pool_stride_y = info.pad_stride_info().stride().second; - int pad_left = info.pad_stride_info().pad_left(); - int pad_top = info.pad_stride_info().pad_top(); - int pad_right = info.pad_stride_info().pad_right(); - int pad_bottom = info.pad_stride_info().pad_bottom(); - bool exclude_padding = info.exclude_padding(); - - // Create reference - SimpleTensor dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() }; - - const auto w_dst = static_cast(dst.shape()[0]); - const auto h_dst = static_cast(dst.shape()[1]); - - if(type == PoolingType::MAX) - { - for(int r = 0; r < upper_dims; ++r) - { - for(int h = 0; h < h_dst; ++h) - { - for(int w = 0; w < w_dst; ++w) - { - int wstart = w * pool_stride_x - pad_left; - int hstart = h * pool_stride_y - pad_top; - int wend = std::min(wstart + pool_size_x, w_src); - int hend = std::min(hstart + pool_size_y, h_src); - wstart = std::max(wstart, 0); - hstart = std::max(hstart, 0); - - T max_val = std::numeric_limits::lowest(); - for(int y = hstart; y < hend; ++y) - { - for(int x = wstart; x < wend; ++x) - { - const T val = src[r * h_src * w_src + y * w_src + x]; - if(val > max_val) - { - max_val = val; - } - } - } - - dst[r * h_dst * w_dst + h * w_dst + w] = max_val; - } - } - } - } - else // Average or l2 pooling - { - for(int r = 0; r < upper_dims; ++r) - { - for(int h = 0; h < h_dst; ++h) - { - for(int w = 0; w < w_dst; ++w) - { - int wstart = w * pool_stride_x - pad_left; - int hstart = h * pool_stride_y - pad_top; - int wend = std::min(wstart + pool_size_x, w_src + pad_right); - int hend = std::min(hstart + pool_size_y, h_src + pad_bottom); - int pool = (hend - hstart) * (wend - wstart); - wstart = std::max(wstart, 0); - hstart = std::max(hstart, 0); - wend = std::min(wend, w_src); - hend = std::min(hend, h_src); - // Exclude padding pixels from the average - if(exclude_padding) - { - pool = (hend - hstart) * (wend - wstart); - } - - using namespace fixed_point_arithmetic; - - const int fixed_point_position = src.fixed_point_position(); - const fixed_point const_1(1, fixed_point_position); - const fixed_point invpool_fp(1.f / static_cast(pool), fixed_point_position); - fixed_point avg_val(0, fixed_point_position, true); - - if(type == PoolingType::AVG) - { - for(int y = hstart; y < hend; ++y) - { - for(int x = wstart; x < wend; ++x) - { - const fixed_point in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true); - avg_val = add(avg_val, in_fp); - } - } - dst[r * h_dst * w_dst + h * w_dst + w] = mul(avg_val, invpool_fp).raw(); - } - else - { - for(int y = hstart; y < hend; ++y) - { - for(int x = wstart; x < wend; ++x) - { - const fixed_point in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true); - avg_val = add(avg_val, mul(in_fp, in_fp)); - } - } - auto res = div(const_1, (inv_sqrt(mul(avg_val, invpool_fp)))); - dst[r * h_dst * w_dst + h * w_dst + w] = res.raw(); - } - } - } - } - } - - return dst; -} - template <> SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo &info) { @@ -285,8 +163,6 @@ SimpleTensor pooling_layer(const SimpleTensor &src, c template SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo &info); template SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo &info); -template SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo &info); -template SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo &info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp index 90b9b1f7e2..ae4bcd8f0e 100644 --- a/tests/validation/reference/SoftmaxLayer.cpp +++ b/tests/validation/reference/SoftmaxLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,7 +38,7 @@ template ::value, int>: SimpleTensor softmax_layer(const SimpleTensor &src, float beta) { // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor dst{ src.shape(), src.data_type(), 1 }; // Compute reference const int cols = src.shape()[0]; @@ -79,7 +79,7 @@ SimpleTensor softmax_layer(const SimpleTensor &src, float beta) using namespace fixed_point_arithmetic; // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + SimpleTensor dst{ src.shape(), src.data_type(), 1 }; // Compute reference const int cols = src.shape()[0]; @@ -128,8 +128,6 @@ SimpleTensor softmax_layer(const SimpleTensor &src, f template SimpleTensor softmax_layer(const SimpleTensor &src, float beta); template SimpleTensor softmax_layer(const SimpleTensor &src, float beta); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp index fe79b4a138..5b89934df5 100644 --- a/tests/validation/reference/WidthConcatenateLayer.cpp +++ b/tests/validation/reference/WidthConcatenateLayer.cpp @@ -85,8 +85,6 @@ SimpleTensor widthconcatenate_layer(const std::vector> &srcs) template SimpleTensor widthconcatenate_layer(const std::vector> &srcs); template SimpleTensor widthconcatenate_layer(const std::vector> &srcs); -template SimpleTensor widthconcatenate_layer(const std::vector> &srcs); -template SimpleTensor widthconcatenate_layer(const std::vector> &srcs); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1