aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/reference
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/reference')
-rw-r--r--tests/validation/reference/AbsoluteDifference.cpp3
-rw-r--r--tests/validation/reference/Accumulate.cpp3
-rw-r--r--tests/validation/reference/ActivationLayer.cpp69
-rw-r--r--tests/validation/reference/ArithmeticAddition.cpp1
-rw-r--r--tests/validation/reference/ArithmeticDivision.cpp1
-rw-r--r--tests/validation/reference/BatchNormalizationLayer.cpp55
-rw-r--r--tests/validation/reference/BatchNormalizationLayer.h6
-rw-r--r--tests/validation/reference/ChannelShuffle.cpp2
-rw-r--r--tests/validation/reference/ConvolutionLayer.cpp6
-rw-r--r--tests/validation/reference/DeconvolutionLayer.cpp2
-rw-r--r--tests/validation/reference/DepthConcatenateLayer.cpp4
-rw-r--r--tests/validation/reference/DepthConvertLayer.cpp58
-rw-r--r--tests/validation/reference/DepthwiseConvolutionLayer.cpp4
-rw-r--r--tests/validation/reference/FlattenLayer.cpp4
-rw-r--r--tests/validation/reference/FullyConnectedLayer.cpp64
-rw-r--r--tests/validation/reference/GEMM.cpp6
-rw-r--r--tests/validation/reference/LocallyConnected.cpp2
-rw-r--r--tests/validation/reference/NormalizationLayer.cpp8
-rw-r--r--tests/validation/reference/Permute.cpp2
-rw-r--r--tests/validation/reference/PoolingLayer.cpp126
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp8
-rw-r--r--tests/validation/reference/WidthConcatenateLayer.cpp2
22 files changed, 37 insertions, 399 deletions
diff --git a/tests/validation/reference/AbsoluteDifference.cpp b/tests/validation/reference/AbsoluteDifference.cpp
index f518e67324..f9fce5b42a 100644
--- a/tests/validation/reference/AbsoluteDifference.cpp
+++ b/tests/validation/reference/AbsoluteDifference.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,7 +24,6 @@
#include "AbsoluteDifference.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/Accumulate.cpp b/tests/validation/reference/Accumulate.cpp
index 29a2007bbd..7f34be9663 100644
--- a/tests/validation/reference/Accumulate.cpp
+++ b/tests/validation/reference/Accumulate.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,7 +24,6 @@
#include "Accumulate.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index df7f6534bc..9455effd72 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,7 +24,6 @@
#include "ActivationLayer.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
@@ -39,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const T a(info.a());
@@ -92,68 +91,6 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
return dst;
}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info)
-{
- using namespace fixed_point_arithmetic;
-
- // Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
-
- // Compute reference
- const int fixed_point_position = src.fixed_point_position();
- const fixed_point<T> a(info.a(), fixed_point_position);
- const fixed_point<T> b(info.b(), fixed_point_position);
- const fixed_point<T> const_0(0, fixed_point_position);
- const fixed_point<T> const_1(1, fixed_point_position);
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- fixed_point<T> x(src[i], fixed_point_position, true);
-
- switch(info.activation())
- {
- case ActivationLayerInfo::ActivationFunction::ABS:
- dst[i] = abs(x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LINEAR:
- dst[i] = add(b, mul(a, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LOGISTIC:
- dst[i] = (const_1 / (const_1 + exp(-x))).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::RELU:
- dst[i] = max(const_0, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- dst[i] = min(a, max(const_0, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
- dst[i] = min(a, max(b, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- dst[i] = (x > const_0) ? x.raw() : mul(a, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
- dst[i] = log(const_1 + exp(x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SQRT:
- dst[i] = (const_1 / inv_sqrt(x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SQUARE:
- dst[i] = mul(x, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::TANH:
- dst[i] = mul(a, tanh(mul(b, x))).raw();
- break;
- default:
- ARM_COMPUTE_ERROR("Unsupported activation function");
- }
- }
-
- return dst;
-}
-
template <>
SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info)
{
@@ -165,8 +102,6 @@ SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src
template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info);
template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info);
-template SimpleTensor<qint8_t> activation_layer(const SimpleTensor<qint8_t> &src, ActivationLayerInfo info);
-template SimpleTensor<qint16_t> activation_layer(const SimpleTensor<qint16_t> &src, ActivationLayerInfo info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ArithmeticAddition.cpp b/tests/validation/reference/ArithmeticAddition.cpp
index f26838dcb8..4569277103 100644
--- a/tests/validation/reference/ArithmeticAddition.cpp
+++ b/tests/validation/reference/ArithmeticAddition.cpp
@@ -24,7 +24,6 @@
#include "ArithmeticAddition.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/ArithmeticDivision.cpp b/tests/validation/reference/ArithmeticDivision.cpp
index 934e89052f..0102231993 100644
--- a/tests/validation/reference/ArithmeticDivision.cpp
+++ b/tests/validation/reference/ArithmeticDivision.cpp
@@ -24,7 +24,6 @@
#include "ArithmeticDivision.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index c8badacc79..3d1a6ed7d7 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -36,56 +36,11 @@ namespace validation
{
namespace reference
{
-// Batch Normalization Layer for fixed point type
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *>
-SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info, int fixed_point_position)
-{
- ARM_COMPUTE_UNUSED(act_info);
- SimpleTensor<T> result(src.shape(), src.data_type());
-
- const auto cols = static_cast<int>(src.shape()[0]);
- const auto rows = static_cast<int>(src.shape()[1]);
- const auto depth = static_cast<int>(src.shape()[2]);
- const int upper_dims = src.shape().total_size() / (cols * rows * depth);
-
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int i = 0; i < depth; ++i)
- {
- for(int k = 0; k < rows; ++k)
- {
- for(int l = 0; l < cols; ++l)
- {
- const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
-
- fixed_point_arithmetic::fixed_point<T> src_qs(src[pos], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
-
- auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
- auto numerator = src_qs - mean_qs;
- auto x_bar = numerator * denominator;
- x_bar = beta_qs + x_bar * gamma_qs;
- result[pos] = x_bar.raw();
- }
- }
- }
- }
-
- return result;
-}
-
// Batch Normalization Layer for floating point type
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *>
SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info, int fixed_point_position)
+ ActivationLayerInfo act_info)
{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
SimpleTensor<T> result(src.shape(), src.data_type());
const auto cols = static_cast<int>(src.shape()[0]);
@@ -119,14 +74,10 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp
return result;
}
template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta,
- const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta,
- const SimpleTensor<int8_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta,
- const SimpleTensor<int16_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+ const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info);
template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var,
const SimpleTensor<half> &beta,
- const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+ const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info);
} // namespace reference
} // namespace validation
diff --git a/tests/validation/reference/BatchNormalizationLayer.h b/tests/validation/reference/BatchNormalizationLayer.h
index 329909dab4..b45d820412 100644
--- a/tests/validation/reference/BatchNormalizationLayer.h
+++ b/tests/validation/reference/BatchNormalizationLayer.h
@@ -37,13 +37,11 @@ namespace reference
{
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info,
- int fixed_point_position);
+ ActivationLayerInfo act_info);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info,
- int fixed_point_position);
+ ActivationLayerInfo act_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ChannelShuffle.cpp b/tests/validation/reference/ChannelShuffle.cpp
index c4d8d50e3d..b8aa9203ab 100644
--- a/tests/validation/reference/ChannelShuffle.cpp
+++ b/tests/validation/reference/ChannelShuffle.cpp
@@ -39,7 +39,7 @@ template <typename T>
SimpleTensor<T> channel_shuffle(const SimpleTensor<T> &src, int num_groups)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), src.num_channels(), src.quantization_info() };
const int M = src.shape()[0];
const int N = src.shape()[1];
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index fe558ba4af..00c839d2df 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -108,7 +108,7 @@ SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor
const Size2D &dilation)
{
// Create reference
- SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.quantization_info() };
if(src.data_layout() == DataLayout::NHWC)
{
@@ -128,10 +128,6 @@ template SimpleTensor<float> convolution_layer(const SimpleTensor<float> &src, c
const PadStrideInfo &info, const Size2D &dilation);
template SimpleTensor<half> convolution_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &output_shape,
const PadStrideInfo &info, const Size2D &dilation);
-template SimpleTensor<qint8_t> convolution_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &output_shape,
- const PadStrideInfo &info, const Size2D &dilation);
-template SimpleTensor<qint16_t> convolution_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &output_shape,
- const PadStrideInfo &info, const Size2D &dilation);
template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
const PadStrideInfo &info, const Size2D &dilation);
} // namespace reference
diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp
index 35437084b8..d073bbf7a1 100644
--- a/tests/validation/reference/DeconvolutionLayer.cpp
+++ b/tests/validation/reference/DeconvolutionLayer.cpp
@@ -46,7 +46,7 @@ SimpleTensor<T> deconvolution_layer(const SimpleTensor<T> &src, const SimpleTens
int out_y = src.shape().y() + (src.shape().y() - 1) * (stride_y - 1) + a.second + 2 * info.pad().second;
scaled_shape.set(0, out_x);
scaled_shape.set(1, out_y);
- SimpleTensor<T> scaled{ scaled_shape, src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> scaled{ scaled_shape, src.data_type(), 1 };
const int width_in = src.shape().x();
const int height_in = src.shape().y();
diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp
index 9a7248493d..c9a23520c7 100644
--- a/tests/validation/reference/DepthConcatenateLayer.cpp
+++ b/tests/validation/reference/DepthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -95,8 +95,6 @@ SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
template SimpleTensor<half> depthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<qint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
-template SimpleTensor<qint16_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp
index dd095b8912..022007720a 100644
--- a/tests/validation/reference/DepthConvertLayer.cpp
+++ b/tests/validation/reference/DepthConvertLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,44 +36,6 @@ namespace validation
{
namespace reference
{
-template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_floating_point<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_UNUSED(shift);
-
- using namespace fixed_point_arithmetic;
- SimpleTensor<T2> result(src.shape(), dt_out);
-
- const int fixed_point_position = src.fixed_point_position();
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- result[i] = static_cast<float>(fixed_point<T1>(src[i], fixed_point_position, true));
- }
-
- return result;
-}
-
-template < typename T1, typename T2, typename std::enable_if < std::is_floating_point<T1>::value &&std::is_integral<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_UNUSED(shift);
-
- using namespace fixed_point_arithmetic;
- SimpleTensor<T2> result(src.shape(), dt_out, 1, src.fixed_point_position());
-
- const int fixed_point_position = result.fixed_point_position();
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- result[i] = fixed_point<T2>(src[i], fixed_point_position).raw();
- }
-
- return result;
-}
-
template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_integral<T2>::value &&!std::is_same<T1, T2>::value, int >::type >
SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
@@ -126,20 +88,6 @@ SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, Con
return result;
}
-template < typename T1, typename T2, typename std::enable_if < std::is_floating_point<T1>::value &&is_floating_point<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_UNUSED(shift);
-
- SimpleTensor<T2> result(src.shape(), dt_out);
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- result[i] = static_cast<T2>(src[i]);
- }
-}
-
template SimpleTensor<uint16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<int16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<int32_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
@@ -147,10 +95,6 @@ template SimpleTensor<uint8_t> depth_convert(const SimpleTensor<uint16_t> &src,
template SimpleTensor<uint32_t> depth_convert(const SimpleTensor<uint16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<uint8_t> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<int32_t> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<float> depth_convert(const SimpleTensor<int8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<float> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<int8_t> depth_convert(const SimpleTensor<float> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<int16_t> depth_convert(const SimpleTensor<float> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index 10c617e953..d8f3cbae49 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -53,7 +53,7 @@ template <typename T, typename TB>
SimpleTensor<T> depthwise_convolution(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info,
unsigned int depth_multiplier)
{
- SimpleTensor<T> dst{ dst_shape, src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ dst_shape, src.data_type(), 1 };
// Compute reference
const int filter_width = weights.shape().x();
@@ -122,7 +122,7 @@ template <>
SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &biases, const TensorShape &dst_shape,
const PadStrideInfo &conv_info, unsigned int depth_multiplier)
{
- SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, src.quantization_info() };
// Create reference
const int input_offset = -src.quantization_info().offset;
diff --git a/tests/validation/reference/FlattenLayer.cpp b/tests/validation/reference/FlattenLayer.cpp
index 44f4d93178..e140d752a0 100644
--- a/tests/validation/reference/FlattenLayer.cpp
+++ b/tests/validation/reference/FlattenLayer.cpp
@@ -36,7 +36,7 @@ namespace reference
template <typename T>
SimpleTensor<T> flatten_layer(const SimpleTensor<T> &src, const TensorShape &shape_flatten)
{
- SimpleTensor<T> dst(shape_flatten, src.data_type(), 1, src.fixed_point_position());
+ SimpleTensor<T> dst(shape_flatten, src.data_type(), 1);
// Note: Since the reference implementation does not use padding bytes, we can copy directly the content of the source tensor
std::copy(src.data(), src.data() + src.num_elements(), dst.data());
@@ -46,8 +46,6 @@ SimpleTensor<T> flatten_layer(const SimpleTensor<T> &src, const TensorShape &sha
template SimpleTensor<float> flatten_layer(const SimpleTensor<float> &src, const TensorShape &shape_flatten);
template SimpleTensor<half> flatten_layer(const SimpleTensor<half> &src, const TensorShape &shape_flatten);
-template SimpleTensor<qint8_t> flatten_layer(const SimpleTensor<qint8_t> &src, const TensorShape &shape_flatten);
-template SimpleTensor<qint16_t> flatten_layer(const SimpleTensor<qint16_t> &src, const TensorShape &shape_flatten);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp
index 5384715ace..3ef10eacea 100644
--- a/tests/validation/reference/FullyConnectedLayer.cpp
+++ b/tests/validation/reference/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,10 +44,8 @@ namespace
// Vector matrix multiply for floating point
template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
- int rows_weights, uint8_t fixed_point_position)
+ int rows_weights)
{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
const T *src_ptr = src.data() + offset_src;
const T *weights_ptr = weights.data();
const TB *bias_ptr = bias.data();
@@ -60,57 +58,16 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
}
}
-// Vector matrix multiply for fixed point type
-template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
-void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
- int rows_weights, uint8_t fixed_point_position)
+// Vector matrix multiply for quantized type
+template < typename T, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&std::is_same<TB, int32_t>::value, int >::type = 0 >
+void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst,
+ int cols_weights, int rows_weights)
{
const T *src_ptr = src.data() + offset_src;
const T *weights_ptr = weights.data();
const TB *bias_ptr = bias.data();
T *dst_ptr = dst.data() + offset_dst;
- using namespace fixed_point_arithmetic;
- using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
- for(int y = 0; y < rows_weights; ++y)
- {
- // Reset accumulator
- fixed_point<promoted_type> acc(0, fixed_point_position);
-
- for(int x = 0; x < cols_weights; ++x)
- {
- const fixed_point<promoted_type> i_value(src_ptr[x], fixed_point_position, true);
- const fixed_point<promoted_type> w_value(weights_ptr[x], fixed_point_position, true);
- acc = acc + i_value * w_value;
- }
-
- // Get the bias
- const fixed_point<T> b(bias_ptr[y], fixed_point_position, true);
-
- // Convert back and accumulate the bias
- fixed_point<T> res(acc);
- res = res + b;
-
- // Store the result
- dst_ptr[y] = res.raw();
-
- weights_ptr += cols_weights;
- }
-}
-
-// Vector matrix multiply for quantized type
-template <>
-void vector_matrix_multiply(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &dst, int offset_src, int offset_dst,
- int cols_weights, int rows_weights, uint8_t fixed_point_position)
-{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
- const uint8_t *src_ptr = src.data() + offset_src;
- const uint8_t *weights_ptr = weights.data();
- const int32_t *bias_ptr = bias.data();
- uint8_t *dst_ptr = dst.data() + offset_dst;
-
const int input_offset = -src.quantization_info().offset;
const float input_scale = src.quantization_info().scale;
const int weights_offset = -weights.quantization_info().offset;
@@ -141,7 +98,7 @@ void vector_matrix_multiply(const SimpleTensor<uint8_t> &src, const SimpleTensor
acc = utility::clamp<int32_t>(acc, 0, 255);
// Store the result
- dst_ptr[y] = static_cast<uint8_t>(acc);
+ dst_ptr[y] = static_cast<T>(acc);
weights_ptr += cols_weights;
}
@@ -152,7 +109,7 @@ template <typename T, typename TB>
SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &dst_shape)
{
// Create reference
- SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.quantization_info() };
// Sanity checks
const int num_batch_dimensions = std::max(0, static_cast<int>(dst_shape.num_dimensions()) - 1);
@@ -183,8 +140,7 @@ SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTe
offset_in,
offset_out,
cols_weights,
- rows_weights,
- src.fixed_point_position());
+ rows_weights);
}
return dst;
@@ -192,8 +148,6 @@ SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTe
template SimpleTensor<float> fully_connected_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &dst_shape);
template SimpleTensor<half> fully_connected_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint8_t> fully_connected_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint16_t> fully_connected_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &dst_shape);
template SimpleTensor<uint8_t> fully_connected_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape);
} // namespace reference
} // namespace validation
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index f9dcfcbdd0..7378ada4ab 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -38,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
{
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
+ SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
// Compute reference
const int M = a.shape().y();
@@ -91,7 +91,7 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
using namespace fixed_point_arithmetic;
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
+ SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
// Compute reference
using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
@@ -156,8 +156,6 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
-template SimpleTensor<qint8_t> gemm(const SimpleTensor<qint8_t> &a, const SimpleTensor<qint8_t> &b, const SimpleTensor<qint8_t> &c, float alpha, float beta);
-template SimpleTensor<qint16_t> gemm(const SimpleTensor<qint16_t> &a, const SimpleTensor<qint16_t> &b, const SimpleTensor<qint16_t> &c, float alpha, float beta);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/LocallyConnected.cpp b/tests/validation/reference/LocallyConnected.cpp
index 08e3f02761..ecc582b181 100644
--- a/tests/validation/reference/LocallyConnected.cpp
+++ b/tests/validation/reference/LocallyConnected.cpp
@@ -41,7 +41,7 @@ template <typename T, typename TB>
SimpleTensor<T> locally_connected(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape, const PadStrideInfo &info)
{
// Create reference
- SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.quantization_info() };
// Compute reference
const int width_in = src.shape().x();
diff --git a/tests/validation/reference/NormalizationLayer.cpp b/tests/validation/reference/NormalizationLayer.cpp
index 226af96fe3..85872c8f90 100644
--- a/tests/validation/reference/NormalizationLayer.cpp
+++ b/tests/validation/reference/NormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLayerInfo info)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const uint32_t norm_size = info.norm_size();
@@ -152,7 +152,7 @@ SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLay
using namespace fixed_point_arithmetic;
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const int fixed_point_position = src.fixed_point_position();
@@ -267,8 +267,6 @@ SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLay
template SimpleTensor<float> normalization_layer(const SimpleTensor<float> &src, NormalizationLayerInfo info);
template SimpleTensor<half> normalization_layer(const SimpleTensor<half> &src, NormalizationLayerInfo info);
-template SimpleTensor<qint8_t> normalization_layer(const SimpleTensor<qint8_t> &src, NormalizationLayerInfo info);
-template SimpleTensor<qint16_t> normalization_layer(const SimpleTensor<qint16_t> &src, NormalizationLayerInfo info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/Permute.cpp b/tests/validation/reference/Permute.cpp
index bbb2e8d4d7..29c3c5cda8 100644
--- a/tests/validation/reference/Permute.cpp
+++ b/tests/validation/reference/Permute.cpp
@@ -42,7 +42,7 @@ SimpleTensor<T> permute(const SimpleTensor<T> &src, PermutationVector perm)
permute(dst_shape, perm);
// Create reference
- SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.quantization_info() };
// Compute reference
for(int i = 0; i < src.num_elements(); ++i)
diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp
index 69734545c9..e9054b9043 100644
--- a/tests/validation/reference/PoolingLayer.cpp
+++ b/tests/validation/reference/PoolingLayer.cpp
@@ -44,7 +44,7 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo
ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
// Create reference
- SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1 };
const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
@@ -152,128 +152,6 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo
return dst;
}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info)
-{
- ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
-
- const auto w_src = static_cast<int>(src.shape()[0]);
- const auto h_src = static_cast<int>(src.shape()[1]);
- const int upper_dims = src.shape().total_size() / (w_src * h_src);
-
- const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
- const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
- PoolingType type = info.pool_type();
- int pool_stride_x = info.pad_stride_info().stride().first;
- int pool_stride_y = info.pad_stride_info().stride().second;
- int pad_left = info.pad_stride_info().pad_left();
- int pad_top = info.pad_stride_info().pad_top();
- int pad_right = info.pad_stride_info().pad_right();
- int pad_bottom = info.pad_stride_info().pad_bottom();
- bool exclude_padding = info.exclude_padding();
-
- // Create reference
- SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() };
-
- const auto w_dst = static_cast<int>(dst.shape()[0]);
- const auto h_dst = static_cast<int>(dst.shape()[1]);
-
- if(type == PoolingType::MAX)
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < h_dst; ++h)
- {
- for(int w = 0; w < w_dst; ++w)
- {
- int wstart = w * pool_stride_x - pad_left;
- int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size_x, w_src);
- int hend = std::min(hstart + pool_size_y, h_src);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
-
- T max_val = std::numeric_limits<T>::lowest();
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const T val = src[r * h_src * w_src + y * w_src + x];
- if(val > max_val)
- {
- max_val = val;
- }
- }
- }
-
- dst[r * h_dst * w_dst + h * w_dst + w] = max_val;
- }
- }
- }
- }
- else // Average or l2 pooling
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < h_dst; ++h)
- {
- for(int w = 0; w < w_dst; ++w)
- {
- int wstart = w * pool_stride_x - pad_left;
- int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size_x, w_src + pad_right);
- int hend = std::min(hstart + pool_size_y, h_src + pad_bottom);
- int pool = (hend - hstart) * (wend - wstart);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
- wend = std::min(wend, w_src);
- hend = std::min(hend, h_src);
- // Exclude padding pixels from the average
- if(exclude_padding)
- {
- pool = (hend - hstart) * (wend - wstart);
- }
-
- using namespace fixed_point_arithmetic;
-
- const int fixed_point_position = src.fixed_point_position();
- const fixed_point<T> const_1(1, fixed_point_position);
- const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
- fixed_point<T> avg_val(0, fixed_point_position, true);
-
- if(type == PoolingType::AVG)
- {
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
- avg_val = add(avg_val, in_fp);
- }
- }
- dst[r * h_dst * w_dst + h * w_dst + w] = mul(avg_val, invpool_fp).raw();
- }
- else
- {
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
- avg_val = add(avg_val, mul(in_fp, in_fp));
- }
- }
- auto res = div(const_1, (inv_sqrt(mul(avg_val, invpool_fp))));
- dst[r * h_dst * w_dst + h * w_dst + w] = res.raw();
- }
- }
- }
- }
- }
-
- return dst;
-}
-
template <>
SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info)
{
@@ -285,8 +163,6 @@ SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, c
template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info);
template SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, const PoolingLayerInfo &info);
-template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, const PoolingLayerInfo &info);
-template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, const PoolingLayerInfo &info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index 90b9b1f7e2..ae4bcd8f0e 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const int cols = src.shape()[0];
@@ -79,7 +79,7 @@ SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
using namespace fixed_point_arithmetic;
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const int cols = src.shape()[0];
@@ -128,8 +128,6 @@ SimpleTensor<uint8_t> softmax_layer<uint8_t>(const SimpleTensor<uint8_t> &src, f
template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta);
template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta);
-template SimpleTensor<qint8_t> softmax_layer(const SimpleTensor<qint8_t> &src, float beta);
-template SimpleTensor<qint16_t> softmax_layer(const SimpleTensor<qint16_t> &src, float beta);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp
index fe79b4a138..5b89934df5 100644
--- a/tests/validation/reference/WidthConcatenateLayer.cpp
+++ b/tests/validation/reference/WidthConcatenateLayer.cpp
@@ -85,8 +85,6 @@ SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<qint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
-template SimpleTensor<qint16_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
} // namespace reference
} // namespace validation
} // namespace test