aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--tests/GLES_COMPUTE/Helper.h11
-rw-r--r--tests/RawTensor.cpp12
-rw-r--r--tests/RawTensor.h42
-rw-r--r--tests/SimpleTensor.h36
-rw-r--r--tests/Utils.h15
-rw-r--r--tests/benchmark/fixtures/ActivationLayerFixture.h5
-rw-r--r--tests/benchmark/fixtures/AlexNetFixture.h5
-rw-r--r--tests/benchmark/fixtures/BatchNormalizationLayerFixture.h14
-rw-r--r--tests/benchmark/fixtures/ConvolutionLayerFixture.h10
-rw-r--r--tests/benchmark/fixtures/DepthConcatenateLayerFixture.h5
-rw-r--r--tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h10
-rw-r--r--tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h16
-rw-r--r--tests/benchmark/fixtures/DirectConvolutionLayerFixture.h10
-rw-r--r--tests/benchmark/fixtures/FlattenLayerFixture.h6
-rw-r--r--tests/benchmark/fixtures/FullyConnectedLayerFixture.h10
-rw-r--r--tests/benchmark/fixtures/GEMMFixture.h10
-rw-r--r--tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h6
-rw-r--r--tests/benchmark/fixtures/GEMMLowpFixture.h6
-rw-r--r--tests/benchmark/fixtures/NormalizationLayerFixture.h6
-rw-r--r--tests/benchmark/fixtures/PoolingLayerFixture.h7
-rw-r--r--tests/benchmark/fixtures/ROIPoolingLayerFixture.h8
-rw-r--r--tests/benchmark/fixtures/SoftmaxLayerFixture.h6
-rw-r--r--tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h10
-rw-r--r--tests/networks/AlexNetNetwork.h143
-rw-r--r--tests/validation/CL/ActivationLayer.cpp3
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp3
-rw-r--r--tests/validation/CL/ArithmeticSubtraction.cpp3
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp15
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp14
-rw-r--r--tests/validation/CL/DepthConvertLayer.cpp44
-rw-r--r--tests/validation/CL/DilatedConvolutionLayer.cpp14
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp3
-rw-r--r--tests/validation/CL/FullyConnectedLayer.cpp17
-rw-r--r--tests/validation/CL/GEMM.cpp14
-rw-r--r--tests/validation/CL/NormalizationLayer.cpp7
-rw-r--r--tests/validation/CL/PoolingLayer.cpp3
-rw-r--r--tests/validation/CL/SYSTEM/AlexNet.cpp4
-rw-r--r--tests/validation/CL/SoftmaxLayer.cpp11
-rw-r--r--tests/validation/CL/Winograd.cpp16
-rw-r--r--tests/validation/GLES_COMPUTE/ActivationLayer.cpp9
-rw-r--r--tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp15
-rw-r--r--tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp11
-rw-r--r--tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp13
-rw-r--r--tests/validation/GLES_COMPUTE/GEMM.cpp13
-rw-r--r--tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp9
-rw-r--r--tests/validation/Helpers.cpp4
-rw-r--r--tests/validation/Helpers.h11
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp10
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp3
-rw-r--r--tests/validation/NEON/ArithmeticSubtraction.cpp3
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp15
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp14
-rw-r--r--tests/validation/NEON/DepthConvertLayer.cpp44
-rw-r--r--tests/validation/NEON/DilatedConvolutionLayer.cpp14
-rw-r--r--tests/validation/NEON/DirectConvolutionLayer.cpp3
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp14
-rw-r--r--tests/validation/NEON/GEMM.cpp14
-rw-r--r--tests/validation/NEON/NormalizationLayer.cpp6
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp8
-rw-r--r--tests/validation/NEON/SYSTEM/AlexNet.cpp4
-rw-r--r--tests/validation/NEON/Scale.cpp4
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp10
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h37
-rw-r--r--tests/validation/fixtures/ArithmeticAdditionFixture.h34
-rw-r--r--tests/validation/fixtures/ArithmeticSubtractionFixture.h26
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFixture.h48
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h26
-rw-r--r--tests/validation/fixtures/DeconvolutionLayerFixture.h30
-rw-r--r--tests/validation/fixtures/DepthConcatenateLayerFixture.h9
-rw-r--r--tests/validation/fixtures/DepthConvertLayerFixture.h28
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h14
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerFixture.h49
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h48
-rw-r--r--tests/validation/fixtures/DropoutLayerFixture.h3
-rw-r--r--tests/validation/fixtures/FlattenLayerFixture.h24
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h30
-rw-r--r--tests/validation/fixtures/GEMMFixture.h30
-rw-r--r--tests/validation/fixtures/GEMMInterleave4x4Fixture.h26
-rw-r--r--tests/validation/fixtures/GEMMTranspose1xWFixture.h24
-rw-r--r--tests/validation/fixtures/Im2ColFixture.h8
-rw-r--r--tests/validation/fixtures/NormalizationLayerFixture.h37
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h30
-rw-r--r--tests/validation/fixtures/ScaleFixture.h6
-rw-r--r--tests/validation/fixtures/SoftmaxLayerFixture.h27
-rw-r--r--tests/validation/fixtures/WidthConcatenateLayerFixture.h9
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h22
-rw-r--r--tests/validation/reference/AbsoluteDifference.cpp3
-rw-r--r--tests/validation/reference/Accumulate.cpp3
-rw-r--r--tests/validation/reference/ActivationLayer.cpp69
-rw-r--r--tests/validation/reference/ArithmeticAddition.cpp1
-rw-r--r--tests/validation/reference/ArithmeticDivision.cpp1
-rw-r--r--tests/validation/reference/BatchNormalizationLayer.cpp55
-rw-r--r--tests/validation/reference/BatchNormalizationLayer.h6
-rw-r--r--tests/validation/reference/ChannelShuffle.cpp2
-rw-r--r--tests/validation/reference/ConvolutionLayer.cpp6
-rw-r--r--tests/validation/reference/DeconvolutionLayer.cpp2
-rw-r--r--tests/validation/reference/DepthConcatenateLayer.cpp4
-rw-r--r--tests/validation/reference/DepthConvertLayer.cpp58
-rw-r--r--tests/validation/reference/DepthwiseConvolutionLayer.cpp4
-rw-r--r--tests/validation/reference/FlattenLayer.cpp4
-rw-r--r--tests/validation/reference/FullyConnectedLayer.cpp64
-rw-r--r--tests/validation/reference/GEMM.cpp6
-rw-r--r--tests/validation/reference/LocallyConnected.cpp2
-rw-r--r--tests/validation/reference/NormalizationLayer.cpp8
-rw-r--r--tests/validation/reference/Permute.cpp2
-rw-r--r--tests/validation/reference/PoolingLayer.cpp126
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp8
-rw-r--r--tests/validation/reference/WidthConcatenateLayer.cpp2
108 files changed, 630 insertions, 1262 deletions
diff --git a/tests/GLES_COMPUTE/Helper.h b/tests/GLES_COMPUTE/Helper.h
index 65f992623a..c04c6b608a 100644
--- a/tests/GLES_COMPUTE/Helper.h
+++ b/tests/GLES_COMPUTE/Helper.h
@@ -37,17 +37,16 @@ namespace test
{
/** Helper to create an empty tensor.
*
- * @param[in] shape Desired shape.
- * @param[in] data_type Desired data type.
- * @param[in] num_channels (Optional) It indicates the number of channels for each tensor element
- * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16.
+ * @param[in] shape Desired shape.
+ * @param[in] data_type Desired data type.
+ * @param[in] num_channels (Optional) It indicates the number of channels for each tensor element
*
* @return Empty @ref GCTensor with the specified shape and data type.
*/
-inline GCTensor create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1, int fixed_point_position = 0)
+inline GCTensor create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1)
{
GCTensor tensor;
- tensor.allocator()->init(TensorInfo(shape, num_channels, data_type, fixed_point_position));
+ tensor.allocator()->init(TensorInfo(shape, num_channels, data_type));
return tensor;
}
diff --git a/tests/RawTensor.cpp b/tests/RawTensor.cpp
index bc2747d2a1..ce2510fe95 100644
--- a/tests/RawTensor.cpp
+++ b/tests/RawTensor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,20 +27,20 @@ namespace arm_compute
{
namespace test
{
-RawTensor::RawTensor(TensorShape shape, Format format, int fixed_point_position)
- : SimpleTensor(shape, format, fixed_point_position)
+RawTensor::RawTensor(TensorShape shape, Format format)
+ : SimpleTensor(shape, format)
{
_buffer = support::cpp14::make_unique<uint8_t[]>(SimpleTensor::num_elements() * SimpleTensor::num_channels() * SimpleTensor::element_size());
}
-RawTensor::RawTensor(TensorShape shape, DataType data_type, int num_channels, int fixed_point_position)
- : SimpleTensor(shape, data_type, num_channels, fixed_point_position)
+RawTensor::RawTensor(TensorShape shape, DataType data_type, int num_channels)
+ : SimpleTensor(shape, data_type, num_channels)
{
_buffer = support::cpp14::make_unique<uint8_t[]>(SimpleTensor::num_elements() * SimpleTensor::num_channels() * SimpleTensor::element_size());
}
RawTensor::RawTensor(const RawTensor &tensor)
- : SimpleTensor(tensor.shape(), tensor.data_type(), tensor.num_channels(), tensor.fixed_point_position())
+ : SimpleTensor(tensor.shape(), tensor.data_type(), tensor.num_channels())
{
_format = tensor.format();
_buffer = support::cpp14::make_unique<uint8_t[]>(num_elements() * num_channels() * element_size());
diff --git a/tests/RawTensor.h b/tests/RawTensor.h
index 3501ad16ff..6078f12ee7 100644
--- a/tests/RawTensor.h
+++ b/tests/RawTensor.h
@@ -40,20 +40,18 @@ class RawTensor : public SimpleTensor<uint8_t>
public:
/** Create an uninitialised tensor of the given @p shape and @p format.
*
- * @param[in] shape Shape of the new raw tensor.
- * @param[in] format Format of the new raw tensor.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
+ * @param[in] shape Shape of the new raw tensor.
+ * @param[in] format Format of the new raw tensor.
*/
- RawTensor(TensorShape shape, Format format, int fixed_point_position = 0);
+ RawTensor(TensorShape shape, Format format);
/** Create an uninitialised tensor of the given @p shape and @p data type.
*
- * @param[in] shape Shape of the new raw tensor.
- * @param[in] data_type Data type of the new raw tensor.
- * @param[in] num_channels (Optional) Number of channels (default = 1).
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers (default = 0).
+ * @param[in] shape Shape of the new raw tensor.
+ * @param[in] data_type Data type of the new raw tensor.
+ * @param[in] num_channels (Optional) Number of channels (default = 1).
*/
- RawTensor(TensorShape shape, DataType data_type, int num_channels = 1, int fixed_point_position = 0);
+ RawTensor(TensorShape shape, DataType data_type, int num_channels = 1);
/** Conversion constructor from SimpleTensor.
*
@@ -65,13 +63,12 @@ public:
template <typename T>
RawTensor(SimpleTensor<T> &&tensor)
{
- _buffer = std::unique_ptr<uint8_t[]>(reinterpret_cast<uint8_t *>(tensor._buffer.release()));
- _shape = std::move(tensor._shape);
- _format = tensor._format;
- _data_type = tensor._data_type;
- _num_channels = tensor._num_channels;
- _fixed_point_position = tensor._fixed_point_position;
- _data_layout = tensor._data_layout;
+ _buffer = std::unique_ptr<uint8_t[]>(reinterpret_cast<uint8_t *>(tensor._buffer.release()));
+ _shape = std::move(tensor._shape);
+ _format = tensor._format;
+ _data_type = tensor._data_type;
+ _num_channels = tensor._num_channels;
+ _data_layout = tensor._data_layout;
}
/** Conversion operator to SimpleTensor.
@@ -84,13 +81,12 @@ public:
operator SimpleTensor<T>()
{
SimpleTensor<T> cast;
- cast._buffer = std::unique_ptr<T[]>(reinterpret_cast<T *>(_buffer.release()));
- cast._shape = std::move(_shape);
- cast._format = _format;
- cast._data_type = _data_type;
- cast._num_channels = _num_channels;
- cast._fixed_point_position = _fixed_point_position;
- cast._data_layout = _data_layout;
+ cast._buffer = std::unique_ptr<T[]>(reinterpret_cast<T *>(_buffer.release()));
+ cast._shape = std::move(_shape);
+ cast._format = _format;
+ cast._data_type = _data_type;
+ cast._num_channels = _num_channels;
+ cast._data_layout = _data_layout;
return cast;
}
diff --git a/tests/SimpleTensor.h b/tests/SimpleTensor.h
index 8b3b37010e..759a869696 100644
--- a/tests/SimpleTensor.h
+++ b/tests/SimpleTensor.h
@@ -64,26 +64,23 @@ public:
/** Create an uninitialised tensor of the given @p shape and @p format.
*
- * @param[in] shape Shape of the new raw tensor.
- * @param[in] format Format of the new raw tensor.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
+ * @param[in] shape Shape of the new raw tensor.
+ * @param[in] format Format of the new raw tensor.
*/
- SimpleTensor(TensorShape shape, Format format, int fixed_point_position = 0);
+ SimpleTensor(TensorShape shape, Format format);
/** Create an uninitialised tensor of the given @p shape and @p data type.
*
- * @param[in] shape Shape of the new raw tensor.
- * @param[in] data_type Data type of the new raw tensor.
- * @param[in] num_channels (Optional) Number of channels (default = 1).
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers (default = 0).
- * @param[in] quantization_info (Optional) Quantization info for asymmetric quantization (default = empty).
- * @param[in] data_layout (Optional) Data layout of the tensor (default = NCHW).
+ * @param[in] shape Shape of the new raw tensor.
+ * @param[in] data_type Data type of the new raw tensor.
+ * @param[in] num_channels (Optional) Number of channels (default = 1).
+ * @param[in] quantization_info (Optional) Quantization info for asymmetric quantization (default = empty).
+ * @param[in] data_layout (Optional) Data layout of the tensor (default = NCHW).
*/
SimpleTensor(TensorShape shape, DataType data_type,
- int num_channels = 1,
- int fixed_point_position = 0,
- QuantizationInfo quantization_info = QuantizationInfo(),
- DataLayout data_layout = DataLayout::NCHW);
+ int num_channels = 1,
+ QuantizationInfo quantization_info = QuantizationInfo(),
+ DataLayout data_layout = DataLayout::NCHW);
/** Create a deep copy of the given @p tensor.
*
@@ -224,17 +221,15 @@ protected:
Format _format{ Format::UNKNOWN };
DataType _data_type{ DataType::UNKNOWN };
int _num_channels{ 0 };
- int _fixed_point_position{ 0 };
QuantizationInfo _quantization_info{};
DataLayout _data_layout{ DataLayout::UNKNOWN };
};
template <typename T>
-SimpleTensor<T>::SimpleTensor(TensorShape shape, Format format, int fixed_point_position)
+SimpleTensor<T>::SimpleTensor(TensorShape shape, Format format)
: _buffer(nullptr),
_shape(shape),
_format(format),
- _fixed_point_position(fixed_point_position),
_quantization_info(),
_data_layout(DataLayout::NCHW)
{
@@ -243,12 +238,11 @@ SimpleTensor<T>::SimpleTensor(TensorShape shape, Format format, int fixed_point_
}
template <typename T>
-SimpleTensor<T>::SimpleTensor(TensorShape shape, DataType data_type, int num_channels, int fixed_point_position, QuantizationInfo quantization_info, DataLayout data_layout)
+SimpleTensor<T>::SimpleTensor(TensorShape shape, DataType data_type, int num_channels, QuantizationInfo quantization_info, DataLayout data_layout)
: _buffer(nullptr),
_shape(shape),
_data_type(data_type),
_num_channels(num_channels),
- _fixed_point_position(fixed_point_position),
_quantization_info(quantization_info),
_data_layout(data_layout)
{
@@ -262,7 +256,6 @@ SimpleTensor<T>::SimpleTensor(const SimpleTensor &tensor)
_format(tensor.format()),
_data_type(tensor.data_type()),
_num_channels(tensor.num_channels()),
- _fixed_point_position(tensor.fixed_point_position()),
_quantization_info(tensor.quantization_info()),
_data_layout(tensor.data_layout())
{
@@ -305,7 +298,7 @@ size_t SimpleTensor<T>::element_size() const
template <typename T>
int SimpleTensor<T>::fixed_point_position() const
{
- return _fixed_point_position;
+ return 0;
}
template <typename T>
@@ -428,7 +421,6 @@ void swap(SimpleTensor<U> &tensor1, SimpleTensor<U> &tensor2)
swap(tensor1._format, tensor2._format);
swap(tensor1._data_type, tensor2._data_type);
swap(tensor1._num_channels, tensor2._num_channels);
- swap(tensor1._fixed_point_position, tensor2._fixed_point_position);
swap(tensor1._quantization_info, tensor2._quantization_info);
swap(tensor1._buffer, tensor2._buffer);
}
diff --git a/tests/Utils.h b/tests/Utils.h
index 332c916498..7d064bdf48 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -508,21 +508,20 @@ inline bool is_in_valid_region(const ValidRegion &valid_region, Coordinates coor
/** Create and initialize a tensor of the given type.
*
- * @param[in] shape Tensor shape.
- * @param[in] data_type Data type.
- * @param[in] num_channels (Optional) Number of channels.
- * @param[in] fixed_point_position (Optional) Number of fractional bits.
- * @param[in] quantization_info (Optional) Quantization info for asymmetric quantized types.
- * @param[in] data_layout (Optional) Data layout. Default is NCHW.
+ * @param[in] shape Tensor shape.
+ * @param[in] data_type Data type.
+ * @param[in] num_channels (Optional) Number of channels.
+ * @param[in] quantization_info (Optional) Quantization info for asymmetric quantized types.
+ * @param[in] data_layout (Optional) Data layout. Default is NCHW.
*
* @return Initialized tensor of given type.
*/
template <typename T>
inline T create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1,
- int fixed_point_position = 0, QuantizationInfo quantization_info = QuantizationInfo(), DataLayout data_layout = DataLayout::NCHW)
+ QuantizationInfo quantization_info = QuantizationInfo(), DataLayout data_layout = DataLayout::NCHW)
{
T tensor;
- TensorInfo info(shape, num_channels, data_type, fixed_point_position);
+ TensorInfo info(shape, num_channels, data_type);
info.set_quantization_info(quantization_info);
info.set_data_layout(data_layout);
tensor.allocator()->init(info);
diff --git a/tests/benchmark/fixtures/ActivationLayerFixture.h b/tests/benchmark/fixtures/ActivationLayerFixture.h
index d46ef2827b..a82861f624 100644
--- a/tests/benchmark/fixtures/ActivationLayerFixture.h
+++ b/tests/benchmark/fixtures/ActivationLayerFixture.h
@@ -48,10 +48,9 @@ public:
shape.set(shape.num_dimensions(), batches);
// Create tensors
- const int fixed_point_position = 4;
const QuantizationInfo q_info(0.5f, -10);
- src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, q_info);
- dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, q_info);
+ src = create_tensor<TensorType>(shape, data_type, 1, q_info);
+ dst = create_tensor<TensorType>(shape, data_type, 1, q_info);
// Create and configure function
act_layer.configure(&src, &dst, info);
diff --git a/tests/benchmark/fixtures/AlexNetFixture.h b/tests/benchmark/fixtures/AlexNetFixture.h
index 46ac61b49a..4662feb918 100644
--- a/tests/benchmark/fixtures/AlexNetFixture.h
+++ b/tests/benchmark/fixtures/AlexNetFixture.h
@@ -53,10 +53,9 @@ public:
template <typename...>
void setup(DataType data_type, int batches)
{
- constexpr bool weights_reshaped = false;
- constexpr int fixed_point_position = 4;
+ constexpr bool weights_reshaped = false;
- network.init(data_type, fixed_point_position, batches, weights_reshaped);
+ network.init(data_type, batches, weights_reshaped);
network.build();
network.allocate();
}
diff --git a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
index ae8f8a7af1..42adefeef6 100644
--- a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
@@ -45,7 +45,7 @@ public:
void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, bool use_gamma, bool use_beta, ActivationLayerInfo act_info, DataType data_type, DataLayout data_layout, int batches)
{
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
tensor_shape.set(tensor_shape.num_dimensions(), batches);
if(data_layout == DataLayout::NHWC)
{
@@ -53,12 +53,12 @@ public:
}
// Create tensors
- src = create_tensor<TensorType>(tensor_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
- dst = create_tensor<TensorType>(tensor_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
- mean = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
- variance = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
- beta = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
- gamma = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(tensor_shape, data_type, 1, QuantizationInfo(), data_layout);
+ dst = create_tensor<TensorType>(tensor_shape, data_type, 1, QuantizationInfo(), data_layout);
+ mean = create_tensor<TensorType>(param_shape, data_type, 1);
+ variance = create_tensor<TensorType>(param_shape, data_type, 1);
+ beta = create_tensor<TensorType>(param_shape, data_type, 1);
+ gamma = create_tensor<TensorType>(param_shape, data_type, 1);
// Create and configure function
TensorType *beta_ptr = use_beta ? &beta : nullptr;
diff --git a/tests/benchmark/fixtures/ConvolutionLayerFixture.h b/tests/benchmark/fixtures/ConvolutionLayerFixture.h
index 511daf73b3..338a02162d 100644
--- a/tests/benchmark/fixtures/ConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/ConvolutionLayerFixture.h
@@ -46,16 +46,16 @@ public:
int batches)
{
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
src_shape.set(3 /* batch */, batches);
dst_shape.set(3 /* batch */, batches);
DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
- weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
- biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(src_shape, data_type, 1);
+ weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+ biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1);
// Create and configure function
conv_layer.configure(&src, &weights, &biases, &dst, info, WeightsInfo(), dilation, act_info);
diff --git a/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h b/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h
index bd4b404fc7..292adde49f 100644
--- a/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h
+++ b/tests/benchmark/fixtures/DepthConcatenateLayerFixture.h
@@ -95,12 +95,12 @@ public:
for(const auto &shape : src_shapes)
{
- _srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+ _srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
src_ptrs.emplace_back(&_srcs.back());
}
TensorShape dst_shape = calculate_depth_concatenate_shape(src_ptrs);
- _dst = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+ _dst = create_tensor<TensorType>(dst_shape, data_type, 1);
_depth_concat.configure(src_ptrs, &_dst);
@@ -139,7 +139,6 @@ private:
std::vector<TensorType> _srcs{};
TensorType _dst{};
Function _depth_concat{};
- int _fractional_bits{ 1 };
};
} // namespace benchmark
} // namespace test
diff --git a/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
index 9276431de8..48ea03810f 100644
--- a/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -57,15 +57,15 @@ public:
weights_shape.set(2, dst_shape.z());
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
src_shape.set(3 /* batch */, batches);
dst_shape.set(3 /* batch */, batches);
// Create tensors
- src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position, QuantizationInfo(0.5f, 10));
- weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(0.5f, 10));
- biases = create_tensor<TensorType>(TensorShape(weights_shape[2]), is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position, QuantizationInfo(0.5f, 10));
+ src = create_tensor<TensorType>(src_shape, data_type, 1, QuantizationInfo(0.5f, 10));
+ weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(0.5f, 10));
+ biases = create_tensor<TensorType>(TensorShape(weights_shape[2]), is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1, QuantizationInfo(0.5f, 10));
// Create and configure function
depth_conv.configure(&src, &weights, &biases, &dst, info);
diff --git a/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h b/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h
index ef1a40704f..927bb4d64d 100644
--- a/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/DepthwiseSeparableConvolutionLayerFixture.h
@@ -47,18 +47,18 @@ public:
PadStrideInfo pad_stride_depthwise_info, PadStrideInfo pad_stride_pointwise_info, DataType data_type, int batches)
{
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
src_shape.set(3 /* batch */, batches);
depthwise_out_shape.set(3 /* batch */, batches);
dst_shape.set(3 /* batch */, batches);
- src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
- depthwise_weights = create_tensor<TensorType>(depthwise_weights_shape, data_type, 1, fixed_point_position);
- depthwise_biases = create_tensor<TensorType>(depthwise_biases_shape, data_type, 1, fixed_point_position);
- depthwise_out = create_tensor<TensorType>(depthwise_out_shape, data_type, 1, fixed_point_position);
- pointwise_weights = create_tensor<TensorType>(pointwise_weights_shape, data_type, 1, fixed_point_position);
- pointwise_biases = create_tensor<TensorType>(pointwise_biases_shape, data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(src_shape, data_type, 1);
+ depthwise_weights = create_tensor<TensorType>(depthwise_weights_shape, data_type, 1);
+ depthwise_biases = create_tensor<TensorType>(depthwise_biases_shape, data_type, 1);
+ depthwise_out = create_tensor<TensorType>(depthwise_out_shape, data_type, 1);
+ pointwise_weights = create_tensor<TensorType>(pointwise_weights_shape, data_type, 1);
+ pointwise_biases = create_tensor<TensorType>(pointwise_biases_shape, data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1);
// Create and configure function
depth_sep_conv_layer.configure(&src, &depthwise_weights, &depthwise_biases, &depthwise_out, &pointwise_weights, &pointwise_biases, &dst, pad_stride_depthwise_info, pad_stride_pointwise_info);
diff --git a/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h b/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
index 419f6dd822..f74f0ece1a 100644
--- a/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/DirectConvolutionLayerFixture.h
@@ -49,16 +49,16 @@ public:
ARM_COMPUTE_UNUSED(dilation);
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
src_shape.set(3 /* batch */, batches);
dst_shape.set(3 /* batch */, batches);
DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
- weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
- biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(src_shape, data_type, 1);
+ weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+ biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1);
// Create and configure function
conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);
diff --git a/tests/benchmark/fixtures/FlattenLayerFixture.h b/tests/benchmark/fixtures/FlattenLayerFixture.h
index 749fa0d7d6..3d46989f71 100644
--- a/tests/benchmark/fixtures/FlattenLayerFixture.h
+++ b/tests/benchmark/fixtures/FlattenLayerFixture.h
@@ -46,11 +46,9 @@ public:
TensorShape shape_flatten(shape);
shape_flatten.collapse(3);
- const unsigned int fixed_point_position = is_data_type_fixed_point(data_type) ? 4 : 0;
-
// Create tensors
- src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(shape_flatten, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(shape, data_type, 1);
+ dst = create_tensor<TensorType>(shape_flatten, data_type, 1);
// Create and configure function
flatten_func.configure(&src, &dst);
diff --git a/tests/benchmark/fixtures/FullyConnectedLayerFixture.h b/tests/benchmark/fixtures/FullyConnectedLayerFixture.h
index e7a5260f44..caef5bebc9 100644
--- a/tests/benchmark/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/benchmark/fixtures/FullyConnectedLayerFixture.h
@@ -45,15 +45,15 @@ public:
void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, DataType data_type, int batches)
{
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
src_shape.set(src_shape.num_dimensions() /* batch */, batches);
dst_shape.set(dst_shape.num_dimensions() /* batch */, batches);
// Create tensors
- src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
- weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
- biases = create_tensor<TensorType>(biases_shape, data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(src_shape, data_type, 1);
+ weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+ biases = create_tensor<TensorType>(biases_shape, data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1);
// Create and configure function
fc_layer.configure(&src, &weights, &biases, &dst);
diff --git a/tests/benchmark/fixtures/GEMMFixture.h b/tests/benchmark/fixtures/GEMMFixture.h
index f706f3eda6..7628abca01 100644
--- a/tests/benchmark/fixtures/GEMMFixture.h
+++ b/tests/benchmark/fixtures/GEMMFixture.h
@@ -44,13 +44,11 @@ public:
template <typename...>
void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape shape_dst, float alpha, float beta, DataType data_type, bool reshape_b_only_on_first_run)
{
- constexpr int fixed_point_position = 4;
-
// Create tensors
- a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
- b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
- c = create_tensor<TensorType>(shape_c, data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(shape_dst, data_type, 1, fixed_point_position);
+ a = create_tensor<TensorType>(shape_a, data_type, 1);
+ b = create_tensor<TensorType>(shape_b, data_type, 1);
+ c = create_tensor<TensorType>(shape_c, data_type, 1);
+ dst = create_tensor<TensorType>(shape_dst, data_type, 1);
// Create and configure function
gemm.configure(&a, &b, &c, &dst, alpha, beta, GEMMInfo(false, false, reshape_b_only_on_first_run));
diff --git a/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h b/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h
index 793c540482..c8e6f4a8db 100644
--- a/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h
+++ b/tests/benchmark/fixtures/GEMMInterleave4x4Fixture.h
@@ -44,14 +44,12 @@ public:
template <typename...>
void setup(size_t x, size_t y, DataType data_type)
{
- constexpr int fixed_point_position = 4;
-
const TensorShape shape_a(x, y);
const TensorShape shape_b(static_cast<size_t>(x * 4.f), static_cast<size_t>(std::ceil(y / 4.f)));
// Create tensors
- a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
- b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
+ a = create_tensor<TensorType>(shape_a, data_type, 1);
+ b = create_tensor<TensorType>(shape_b, data_type, 1);
// Create and configure function
gemm.configure(&a, &b);
diff --git a/tests/benchmark/fixtures/GEMMLowpFixture.h b/tests/benchmark/fixtures/GEMMLowpFixture.h
index b5381b0934..46a2f5cc6a 100644
--- a/tests/benchmark/fixtures/GEMMLowpFixture.h
+++ b/tests/benchmark/fixtures/GEMMLowpFixture.h
@@ -53,9 +53,9 @@ public:
// Note: The offsets for matrix A and matrix B are set to 0 in order to skip the computation for the offset contribution
// Create tensors
- a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
- b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
- c = create_tensor<TensorType>(shape_dst, DataType::S32, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
+ a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1, QuantizationInfo(1.0f / 255.0f, 0));
+ b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1, QuantizationInfo(1.0f / 255.0f, 0));
+ c = create_tensor<TensorType>(shape_dst, DataType::S32, 1, QuantizationInfo(1.0f / 255.0f, 0));
// Create and configure function
gemmlowp.configure(&a, &b, &c);
diff --git a/tests/benchmark/fixtures/NormalizationLayerFixture.h b/tests/benchmark/fixtures/NormalizationLayerFixture.h
index 7742dcacfd..4331506250 100644
--- a/tests/benchmark/fixtures/NormalizationLayerFixture.h
+++ b/tests/benchmark/fixtures/NormalizationLayerFixture.h
@@ -45,12 +45,12 @@ public:
void setup(TensorShape shape, NormalizationLayerInfo info, DataType data_type, int batches)
{
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
shape.set(shape.num_dimensions(), batches);
// Create tensors
- src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(shape, data_type, 1);
+ dst = create_tensor<TensorType>(shape, data_type, 1);
// Create and configure function
norm_layer.configure(&src, &dst, info);
diff --git a/tests/benchmark/fixtures/PoolingLayerFixture.h b/tests/benchmark/fixtures/PoolingLayerFixture.h
index 5a1a29612c..cbcfe2e869 100644
--- a/tests/benchmark/fixtures/PoolingLayerFixture.h
+++ b/tests/benchmark/fixtures/PoolingLayerFixture.h
@@ -48,7 +48,6 @@ public:
void setup(TensorShape src_shape, PoolingLayerInfo info, DataType data_type, DataLayout data_layout, int batches)
{
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
// Permute shape if NHWC format
if(data_layout == DataLayout::NHWC)
@@ -56,7 +55,7 @@ public:
permute(src_shape, PermutationVector(2U, 0U, 1U));
}
- TensorInfo src_info(src_shape, 1, data_type, fixed_point_position);
+ TensorInfo src_info(src_shape, 1, data_type);
src_info.set_data_layout(data_layout);
TensorShape dst_shape = compute_pool_shape(src_info, info);
@@ -65,8 +64,8 @@ public:
dst_shape.set(dst_shape.num_dimensions(), batches);
// Create tensors
- src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
- dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position, QuantizationInfo(), data_layout);
+ src = create_tensor<TensorType>(src_shape, data_type, 1, QuantizationInfo(), data_layout);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1, QuantizationInfo(), data_layout);
// Create and configure function
pool_layer.configure(&src, &dst, info);
diff --git a/tests/benchmark/fixtures/ROIPoolingLayerFixture.h b/tests/benchmark/fixtures/ROIPoolingLayerFixture.h
index 4adfa446d6..fa4a5b7044 100644
--- a/tests/benchmark/fixtures/ROIPoolingLayerFixture.h
+++ b/tests/benchmark/fixtures/ROIPoolingLayerFixture.h
@@ -47,8 +47,8 @@ public:
void setup(TensorShape shape, const ROIPoolingLayerInfo pool_info, unsigned int num_rois, DataType data_type, int batches)
{
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
- TensorShape shape_dst;
+
+ TensorShape shape_dst;
shape.set(shape.num_dimensions(), batches);
shape_dst.set(0, pool_info.pooled_width());
shape_dst.set(1, pool_info.pooled_height());
@@ -56,8 +56,8 @@ public:
shape_dst.set(3, num_rois);
// Create tensors
- src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(shape_dst, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(shape, data_type, 1);
+ dst = create_tensor<TensorType>(shape_dst, data_type, 1);
// Create random ROIs
std::vector<ROI> rois = generate_random_rois(shape, pool_info, num_rois, 0U);
diff --git a/tests/benchmark/fixtures/SoftmaxLayerFixture.h b/tests/benchmark/fixtures/SoftmaxLayerFixture.h
index 4f6dde8706..4d092f7567 100644
--- a/tests/benchmark/fixtures/SoftmaxLayerFixture.h
+++ b/tests/benchmark/fixtures/SoftmaxLayerFixture.h
@@ -45,11 +45,9 @@ public:
template <typename...>
void setup(TensorShape shape, DataType data_type)
{
- const unsigned int fixed_point_position = 4;
-
// Create tensors
- src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256, 10));
- dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256, 0));
+ src = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(1.f / 256, 10));
+ dst = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(1.f / 256, 0));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h b/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
index 8ed75af664..5f44517817 100644
--- a/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h
@@ -48,16 +48,16 @@ public:
ARM_COMPUTE_UNUSED(dilation);
// Set batched in source and destination shapes
- const unsigned int fixed_point_position = 4;
+
src_shape.set(3 /* batch */, batches);
dst_shape.set(3 /* batch */, batches);
DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- src = create_tensor<TensorType>(src_shape, data_type, 1, fixed_point_position);
- weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
- biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, fixed_point_position);
- dst = create_tensor<TensorType>(dst_shape, data_type, 1, fixed_point_position);
+ src = create_tensor<TensorType>(src_shape, data_type, 1);
+ weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+ biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1);
// Create and configure function
conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);
diff --git a/tests/networks/AlexNetNetwork.h b/tests/networks/AlexNetNetwork.h
index fa3734d106..e92affe954 100644
--- a/tests/networks/AlexNetNetwork.h
+++ b/tests/networks/AlexNetNetwork.h
@@ -56,37 +56,35 @@ class AlexNetNetwork
public:
/** Initialize the network.
*
- * @param[in] data_type Data type.
- * @param[in] fixed_point_position Fixed point position (for quantized data types).
- * @param[in] batches Number of batches.
- * @param[in] reshaped_weights Whether the weights need reshaping or not. Default: false.
+ * @param[in] data_type Data type.
+ * @param[in] batches Number of batches.
+ * @param[in] reshaped_weights Whether the weights need reshaping or not. Default: false.
*/
- void init(DataType data_type, int fixed_point_position, int batches, bool reshaped_weights = false)
+ void init(DataType data_type, int batches, bool reshaped_weights = false)
{
- _data_type = data_type;
- _fixed_point_position = fixed_point_position;
- _batches = batches;
- _reshaped_weights = reshaped_weights;
+ _data_type = data_type;
+ _batches = batches;
+ _reshaped_weights = reshaped_weights;
// Initialize weights and biases
if(!_reshaped_weights)
{
- w[0].allocator()->init(TensorInfo(TensorShape(11U, 11U, 3U, 96U), 1, _data_type, _fixed_point_position));
- b[0].allocator()->init(TensorInfo(TensorShape(96U), 1, _data_type, _fixed_point_position));
- w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type, _fixed_point_position));
- b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
- w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type, _fixed_point_position));
- b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
- w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type, _fixed_point_position));
- b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
- w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type, _fixed_point_position));
- b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
- w[5].allocator()->init(TensorInfo(TensorShape(9216U, 4096U), 1, _data_type, _fixed_point_position));
- b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
- w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
- b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
- w[7].allocator()->init(TensorInfo(TensorShape(4096U, 1000U), 1, _data_type, _fixed_point_position));
- b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
+ w[0].allocator()->init(TensorInfo(TensorShape(11U, 11U, 3U, 96U), 1, _data_type));
+ b[0].allocator()->init(TensorInfo(TensorShape(96U), 1, _data_type));
+ w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type));
+ b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
+ w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type));
+ b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
+ w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type));
+ b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
+ w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type));
+ b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
+ w[5].allocator()->init(TensorInfo(TensorShape(9216U, 4096U), 1, _data_type));
+ b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
+ w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type));
+ b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
+ w[7].allocator()->init(TensorInfo(TensorShape(4096U, 1000U), 1, _data_type));
+ b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type));
w11 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
w12 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
@@ -122,7 +120,7 @@ public:
};
// Create tensor for the reshaped weights
- w[0].allocator()->init(TensorInfo(reshape(366U, 96U, true), 1, _data_type, _fixed_point_position));
+ w[0].allocator()->init(TensorInfo(reshape(366U, 96U, true), 1, _data_type));
// Configure the direct convolution's weights. Direct convolution doesn't need reshape weights
if(!_is_direct_conv)
@@ -133,13 +131,13 @@ public:
auto w32_tensor = std::unique_ptr<TensorType>(new TensorType());
auto w41_tensor = std::unique_ptr<TensorType>(new TensorType());
auto w42_tensor = std::unique_ptr<TensorType>(new TensorType());
- w11_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type, _fixed_point_position));
- w12_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type, _fixed_point_position));
- w31_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type, _fixed_point_position));
- w32_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type, _fixed_point_position));
- w41_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type, _fixed_point_position));
- w42_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type, _fixed_point_position));
- w[2].allocator()->init(TensorInfo(reshape(2560U, 384U, true), 1, _data_type, _fixed_point_position));
+ w11_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type));
+ w12_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type));
+ w31_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type));
+ w32_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type));
+ w41_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type));
+ w42_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type));
+ w[2].allocator()->init(TensorInfo(reshape(2560U, 384U, true), 1, _data_type));
w11 = std::move(w11_tensor);
w12 = std::move(w12_tensor);
w31 = std::move(w31_tensor);
@@ -149,14 +147,14 @@ public:
}
else
{
- w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type, _fixed_point_position));
- b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
- w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type, _fixed_point_position));
- b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
- w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type, _fixed_point_position));
- b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
- w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type, _fixed_point_position));
- b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
+ w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type));
+ b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
+ w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type));
+ b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
+ w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type));
+ b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type));
+ w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type));
+ b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type));
w11 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
w12 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
b11 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates()));
@@ -173,21 +171,21 @@ public:
b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128)));
}
- b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
- b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
- b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
+ b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
+ b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type));
+ b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type));
if(_batches > 1 && std::is_same<TensorType, Tensor>::value)
{
- w[5].allocator()->init(TensorInfo(reshape(9216U, 4096U, false), 1, _data_type, _fixed_point_position));
- w[6].allocator()->init(TensorInfo(reshape(4096U, 4096U, false), 1, _data_type, _fixed_point_position));
- w[7].allocator()->init(TensorInfo(reshape(4096U, 1000U, false), 1, _data_type, _fixed_point_position));
+ w[5].allocator()->init(TensorInfo(reshape(9216U, 4096U, false), 1, _data_type));
+ w[6].allocator()->init(TensorInfo(reshape(4096U, 4096U, false), 1, _data_type));
+ w[7].allocator()->init(TensorInfo(reshape(4096U, 1000U, false), 1, _data_type));
}
else
{
- w[5].allocator()->init(TensorInfo(TensorShape(4096U, 9216U), 1, _data_type, _fixed_point_position));
- w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
- w[7].allocator()->init(TensorInfo(TensorShape(1000U, 4096U), 1, _data_type, _fixed_point_position));
+ w[5].allocator()->init(TensorInfo(TensorShape(4096U, 9216U), 1, _data_type));
+ w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type));
+ w[7].allocator()->init(TensorInfo(TensorShape(1000U, 4096U), 1, _data_type));
}
}
}
@@ -195,50 +193,50 @@ public:
/** Build the network */
void build()
{
- input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, _data_type, _fixed_point_position));
- output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
+ input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, _data_type));
+ output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type));
// Initialize intermediate tensors
// Layer 1
- conv1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
- act1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
- norm1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
- pool1_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 96U, _batches), 1, _data_type, _fixed_point_position));
+ conv1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type));
+ act1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type));
+ norm1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type));
+ pool1_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 96U, _batches), 1, _data_type));
pool11_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates()));
pool12_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates(0, 0, 48)));
// Layer 2
- conv2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
+ conv2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type));
conv21_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates()));
conv22_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates(0, 0, 128)));
- act2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
- norm2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
- pool2_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
+ act2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type));
+ norm2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type));
+ pool2_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type));
// Layer 3
- conv3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
- act3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
+ conv3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
+ act3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
act31_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
act32_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
// Layer 4
- conv4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
+ conv4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
conv41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
conv42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
- act4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
+ act4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type));
act41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
act42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
// Layer 5
- conv5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
+ conv5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type));
conv51_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates()));
conv52_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates(0, 0, 128)));
- act5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
- pool5_out.allocator()->init(TensorInfo(TensorShape(6U, 6U, 256U, _batches), 1, _data_type, _fixed_point_position));
+ act5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type));
+ pool5_out.allocator()->init(TensorInfo(TensorShape(6U, 6U, 256U, _batches), 1, _data_type));
// Layer 6
- fc6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
- act6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
+ fc6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
+ act6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
// Layer 7
- fc7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
- act7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
+ fc7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
+ act7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type));
// Layer 8
- fc8_out.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
+ fc8_out.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type));
// Configure Layers
// Layer 1
@@ -606,7 +604,6 @@ private:
};
DataType _data_type{ DataType::UNKNOWN };
- int _fixed_point_position{ 0 };
unsigned int _batches{ 0 };
bool _reshaped_weights{ false };
bool _is_direct_conv{ !std::is_same<ConvolutionLayerFunction, DirectConvolutionLayerFunction>::value };
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index f122f6deeb..4f97d7b6c1 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -221,9 +221,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
-
-template <typename T>
using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
/** Input data sets. */
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 0646c05ad7..256d93f7f5 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -202,9 +202,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture<int16_t>, framework
}
TEST_SUITE_END()
-template <typename T>
-using CLArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
-
TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp
index 4ba5387a61..b19d963515 100644
--- a/tests/validation/CL/ArithmeticSubtraction.cpp
+++ b/tests/validation/CL/ArithmeticSubtraction.cpp
@@ -231,9 +231,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLAriSubU8S16ToS16Fixture, framework::DatasetMo
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T1, typename T2 = T1, typename T3 = T1>
-using CLArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T1, T2, T3>;
-
TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset),
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index de775bf807..0d80ff7eb7 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -67,9 +67,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
TensorShape src_dst_shapes = shape0;
if(data_layout == DataLayout::NHWC)
{
@@ -77,12 +74,12 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
}
// Create tensors
- CLTensor src = create_tensor<CLTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- CLTensor dst = create_tensor<CLTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
- CLTensor var = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
- CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
- CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ CLTensor dst = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1);
+ CLTensor var = create_tensor<CLTensor>(shape1, dt, 1);
+ CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1);
+ CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1);
// Create and Configure function
CLBatchNormalizationLayer norm;
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 7fd29f4d69..30dd8502ca 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -153,16 +153,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
ActivationFunctionsDataset),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -251,9 +248,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-
-template <typename T>
using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
diff --git a/tests/validation/CL/DepthConvertLayer.cpp b/tests/validation/CL/DepthConvertLayer.cpp
index c6e9f75a59..ed1f54ca6e 100644
--- a/tests/validation/CL/DepthConvertLayer.cpp
+++ b/tests/validation/CL/DepthConvertLayer.cpp
@@ -67,19 +67,15 @@ template <typename T>
using CLDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint8_t>;
template <typename T>
using CLDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint32_t>;
-template <typename T>
-using CLDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, float>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U16, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -117,11 +113,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -158,11 +152,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -200,11 +192,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -241,11 +231,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U32, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U32, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -282,11 +270,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -323,11 +309,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index 4b22390b08..fdd6cc812a 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -114,16 +114,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
CNNDataTypes),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -204,9 +201,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-
-template <typename T>
using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index d8b2d7e155..a796b6e4da 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -200,9 +200,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
-
-template <typename T>
using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
template <typename T>
using CLDirectConvolutionValidationWithTensorShapesQuantizedFixture = DirectConvolutionValidationWithTensorShapesQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index 069d8a73ac..9958a88419 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -69,10 +69,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CNNDataTypes),
src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
- const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo();
+ const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+ const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo();
TensorShape ws(weights_shape);
@@ -85,10 +83,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
}
// Create tensors
- CLTensor src = create_tensor<CLTensor>(src_shape, data_type, 1, fixed_point_position, quantization_info);
- CLTensor weights = create_tensor<CLTensor>(ws, data_type, 1, fixed_point_position, quantization_info);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type, 1, fixed_point_position, quantization_info);
+ CLTensor src = create_tensor<CLTensor>(src_shape, data_type, 1, quantization_info);
+ CLTensor weights = create_tensor<CLTensor>(ws, data_type, 1, quantization_info);
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, quantization_info);
+ CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type, 1, quantization_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -192,9 +190,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
-
-template <typename T>
using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp
index d066281843..639182030e 100644
--- a/tests/validation/CL/GEMM.cpp
+++ b/tests/validation/CL/GEMM.cpp
@@ -86,14 +86,11 @@ TEST_SUITE_END() // INTERLEAVE_4X4
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, fixed_point_position);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, fixed_point_position);
- CLTensor c = create_tensor<CLTensor>(shape_c, data_type, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position);
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1);
+ CLTensor c = create_tensor<CLTensor>(shape_c, data_type, 1);
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -152,9 +149,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMFixture<float>, framework::DatasetMode::N
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using CLGEMMFixedPointFixture = GEMMValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMM, T>;
-
TEST_SUITE(OUTPUT_3D)
TEST_SUITE(Float)
TEST_SUITE(FP32)
diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp
index f6a8e7a9fb..a2dbaff272 100644
--- a/tests/validation/CL/NormalizationLayer.cpp
+++ b/tests/validation/CL/NormalizationLayer.cpp
@@ -52,10 +52,6 @@ const auto NormalizationDataset = combine(combine(combine(combine(datasets::Smal
framework::dataset::make("NormalizationSize", 3, 9, 2)),
framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
framework::dataset::make("IsScaled", { true }));
-const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()),
- framework::dataset::make("NormalizationSize", 3, 9, 2)),
- framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
- framework::dataset::make("IsScaled", { true }));
const auto NormalizationDatasetFP16 = combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })),
framework::dataset::make("NormalizationSize", 3, 9, 2)),
framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
@@ -135,9 +131,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLNormalizationLayerFixture<float>, framework::
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using CLNormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<CLTensor, CLAccessor, CLNormalizationLayer, T>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index b28a5ebca9..0b8a11fe5d 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -148,9 +148,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<half>, framework::Dataset
TEST_SUITE_END() // FP16
TEST_SUITE_END() // Float
-template <typename T>
-using CLPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
-
TEST_SUITE(Quantized)
template <typename T>
diff --git a/tests/validation/CL/SYSTEM/AlexNet.cpp b/tests/validation/CL/SYSTEM/AlexNet.cpp
index 75f8d19651..9be6f2cf53 100644
--- a/tests/validation/CL/SYSTEM/AlexNet.cpp
+++ b/tests/validation/CL/SYSTEM/AlexNet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -79,7 +79,7 @@ std::vector<unsigned int> compute_alexnet(DataType dt, unsigned int batches, std
"cnn_data/alexnet_model/fc8_b.npy"
};
CLAlexNetModel network{};
- network.init(dt, 4, batches);
+ network.init(dt, batches);
network.build();
network.allocate();
network.fill(weight_files, bias_files);
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
index b47f84f8cd..66ca0b8ca7 100644
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ b/tests/validation/CL/SoftmaxLayer.cpp
@@ -64,13 +64,11 @@ TEST_SUITE(SoftmaxLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
{
- // Set fixed point position and quantization info if is allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
- const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(1.f / 255.f, 0) : QuantizationInfo();
+ const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(1.f / 255.f, 0) : QuantizationInfo();
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position, quantization_info);
- CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256.f, 0));
+ CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, quantization_info);
+ CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, QuantizationInfo(1.f / 256.f, 0));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -167,9 +165,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLSoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
-
-template <typename T>
using CLSoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index f68ec8c286..501afaccf9 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -169,7 +169,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
// Create tensors
- CLTensor in = create_tensor<CLTensor>(shape_in, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor in = create_tensor<CLTensor>(shape_in, data_type, 1, QuantizationInfo(), data_layout);
CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -216,7 +216,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
// Create tensors
- CLTensor in = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor in = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, QuantizationInfo(), data_layout);
CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -296,8 +296,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL,
TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, 0, QuantizationInfo(), data_layout);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, QuantizationInfo(), data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -348,8 +348,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL,
TensorShape shape_b = compute_winograd_filter_transform_shape(tensor_info_in, winograd_info);
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor a = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, QuantizationInfo(), data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -444,7 +444,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create tensors
CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -481,7 +481,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create tensors
CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
index 23821d35fa..a8c7253b8f 100644
--- a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -112,12 +112,9 @@ TEST_SUITE(ActivationLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })),
shape, data_type, in_place)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = 0;
-
// Create tensors
- GCTensor src = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
- GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
+ GCTensor src = create_tensor<GCTensor>(shape, data_type, 1);
+ GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
index d22f1e9958..3a3d1d796d 100644
--- a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
@@ -67,9 +67,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
framework::dataset::make("DataLayout", { DataLayout::NCHW })),
shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout)
{
- // Set fixed point position data type allowed
- int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
TensorShape src_dst_shapes = shape0;
if(data_layout == DataLayout::NHWC)
{
@@ -77,12 +74,12 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
}
// Create tensors
- GCTensor src = create_tensor<GCTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- GCTensor dst = create_tensor<GCTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- GCTensor mean = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
- GCTensor var = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
- GCTensor beta = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
- GCTensor gamma = create_tensor<GCTensor>(shape1, dt, 1, fixed_point_position);
+ GCTensor src = create_tensor<GCTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ GCTensor dst = create_tensor<GCTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ GCTensor mean = create_tensor<GCTensor>(shape1, dt, 1);
+ GCTensor var = create_tensor<GCTensor>(shape1, dt, 1);
+ GCTensor beta = create_tensor<GCTensor>(shape1, dt, 1);
+ GCTensor gamma = create_tensor<GCTensor>(shape1, dt, 1);
// Create and Configure function
GCBatchNormalizationLayer norm;
diff --git a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
index 0f8151278a..2961dc9519 100644
--- a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
@@ -70,16 +70,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
ActivationFunctionsDataset),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- GCTensor src = create_tensor<GCTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- GCTensor weights = create_tensor<GCTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- GCTensor bias = create_tensor<GCTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- GCTensor dst = create_tensor<GCTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ GCTensor src = create_tensor<GCTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ GCTensor weights = create_tensor<GCTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ GCTensor bias = create_tensor<GCTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ GCTensor dst = create_tensor<GCTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp b/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
index 4040f468f4..49716dc946 100644
--- a/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,9 +66,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CNNDataTypes),
src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
TensorShape ws(weights_shape);
// Transpose weights if not done in the function
@@ -80,10 +77,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
}
// Create tensors
- GCTensor src = create_tensor<GCTensor>(src_shape, data_type, 1, fixed_point_position);
- GCTensor weights = create_tensor<GCTensor>(ws, data_type, 1, fixed_point_position);
- GCTensor bias = create_tensor<GCTensor>(bias_shape, data_type, 1, fixed_point_position);
- GCTensor dst = create_tensor<GCTensor>(dst_shape, data_type, 1, fixed_point_position);
+ GCTensor src = create_tensor<GCTensor>(src_shape, data_type, 1);
+ GCTensor weights = create_tensor<GCTensor>(ws, data_type, 1);
+ GCTensor bias = create_tensor<GCTensor>(bias_shape, data_type, 1);
+ GCTensor dst = create_tensor<GCTensor>(dst_shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/GEMM.cpp b/tests/validation/GLES_COMPUTE/GEMM.cpp
index 2abad3206d..6417143258 100644
--- a/tests/validation/GLES_COMPUTE/GEMM.cpp
+++ b/tests/validation/GLES_COMPUTE/GEMM.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,14 +59,11 @@ TEST_SUITE(GEMM)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- GCTensor a = create_tensor<GCTensor>(shape_a, data_type, 1, fixed_point_position);
- GCTensor b = create_tensor<GCTensor>(shape_b, data_type, 1, fixed_point_position);
- GCTensor c = create_tensor<GCTensor>(shape_c, data_type, 1, fixed_point_position);
- GCTensor dst = create_tensor<GCTensor>(output_shape, data_type, 1, fixed_point_position);
+ GCTensor a = create_tensor<GCTensor>(shape_a, data_type, 1);
+ GCTensor b = create_tensor<GCTensor>(shape_b, data_type, 1);
+ GCTensor c = create_tensor<GCTensor>(shape_c, data_type, 1);
+ GCTensor dst = create_tensor<GCTensor>(output_shape, data_type, 1);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
index 2c281419de..abc277aaa9 100644
--- a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,12 +59,9 @@ TEST_SUITE(SoftmaxLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- GCTensor src = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
- GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1, fixed_point_position);
+ GCTensor src = create_tensor<GCTensor>(shape, data_type, 1);
+ GCTensor dst = create_tensor<GCTensor>(shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index ff69b1c4b6..8832fce6f1 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -150,7 +150,7 @@ CannyEdgeParameters canny_edge_parameters()
SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src)
{
const QuantizationInfo &quantization_info = src.quantization_info();
- SimpleTensor<float> dst{ src.shape(), DataType::F32, 1, 0, QuantizationInfo(), src.data_layout() };
+ SimpleTensor<float> dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
for(int i = 0; i < src.num_elements(); ++i)
{
@@ -161,7 +161,7 @@ SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src)
SimpleTensor<uint8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
{
- SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, 0, quantization_info };
+ SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
for(int i = 0; i < src.num_elements(); ++i)
{
dst[i] = quantization_info.quantize(src[i], RoundingPolicy::TO_NEAREST_UP);
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 88262d5e66..2b4d277e92 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -52,14 +52,13 @@ struct is_floating_point<half> : public std::true_type
/** Helper function to get the testing range for each activation layer.
*
- * @param[in] activation Activation function to test.
- * @param[in] data_type Data type.
- * @param[in] fixed_point_position Number of bits for the fractional part. Defaults to 1.
+ * @param[in] activation Activation function to test.
+ * @param[in] data_type Data type.
*
* @return A pair containing the lower upper testing bounds for a given function.
*/
template <typename T>
-std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::ActivationFunction activation, DataType data_type, int fixed_point_position = 0)
+std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
{
std::pair<T, T> bounds;
@@ -178,12 +177,12 @@ void fill_lookuptable(T &&table)
/** Helper function to get the testing range for batch normalization layer.
*
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part. Defaults to 1.
+ * @param[in] fixed_point_position (Optional) Number of bits for the fractional part. Defaults to 0.
*
* @return A pair containing the lower upper testing bounds.
*/
template <typename T>
-std::pair<T, T> get_batchnormalization_layer_test_bounds(int fixed_point_position = 1)
+std::pair<T, T> get_batchnormalization_layer_test_bounds(int fixed_point_position = 0)
{
const bool is_float = std::is_floating_point<T>::value;
std::pair<T, T> bounds;
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 289ca4870e..dee264c6b8 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -90,12 +90,9 @@ TEST_SUITE(ActivationLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })),
shape, data_type, in_place)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -196,9 +193,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NEActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<Tensor, Accessor, NEActivationLayer, T>;
-
-template <typename T>
using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<Tensor, Accessor, NEActivationLayer, T>;
/** Input data sets. */
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index b01e5d929d..3632c3c207 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -163,9 +163,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<int16_t>, framework
}
TEST_SUITE_END()
-template <typename T>
-using NEArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticAddition, T>;
-
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(F16)
diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp
index fc25465e6d..210ed4578f 100644
--- a/tests/validation/NEON/ArithmeticSubtraction.cpp
+++ b/tests/validation/NEON/ArithmeticSubtraction.cpp
@@ -233,9 +233,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEAriSubU8S16ToS16Fixture, framework::DatasetMo
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T1, typename T2 = T1, typename T3 = T1>
-using NEArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticSubtraction, T1, T2, T3>;
-
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 3a18a0a93b..ca13d26495 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -68,9 +68,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
TensorShape src_dst_shapes = shape0;
if(data_layout == DataLayout::NHWC)
{
@@ -78,12 +75,12 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
}
// Create tensors
- Tensor src = create_tensor<Tensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- Tensor dst = create_tensor<Tensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- Tensor mean = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
- Tensor var = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
- Tensor beta = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
- Tensor gamma = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ Tensor dst = create_tensor<Tensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ Tensor mean = create_tensor<Tensor>(shape1, dt, 1);
+ Tensor var = create_tensor<Tensor>(shape1, dt, 1);
+ Tensor beta = create_tensor<Tensor>(shape1, dt, 1);
+ Tensor gamma = create_tensor<Tensor>(shape1, dt, 1);
// Create and Configure function
NEBatchNormalizationLayer norm;
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 94b38c2c81..591d1424c8 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -154,16 +154,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -246,9 +243,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-
-template <typename T>
using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp
index 2bd3db7075..78070d004e 100644
--- a/tests/validation/NEON/DepthConvertLayer.cpp
+++ b/tests/validation/NEON/DepthConvertLayer.cpp
@@ -66,19 +66,15 @@ template <typename T>
using NEDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint8_t>;
template <typename T>
using NEDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint32_t>;
-template <typename T>
-using NEDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, float>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -116,11 +112,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -157,11 +151,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -199,11 +191,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -240,11 +230,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -281,11 +269,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -322,11 +308,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index e703c67868..7cfffc0c2b 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -106,16 +106,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
CNNDataTypes),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -198,9 +195,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-
-template <typename T>
using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index 4995d881cc..bf5b33c9a2 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -173,9 +173,6 @@ FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<float>, framework::D
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NEDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
-
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
{
ActivationLayerInfo(),
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index 3adcf61dc9..174778b8ef 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -68,9 +68,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CNNDataTypes),
src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
TensorShape ws(weights_shape);
// Transpose weights if not done in the function
@@ -92,10 +89,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
}
// Create tensors
- Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, fixed_point_position);
- Tensor weights = create_tensor<Tensor>(ws, data_type, 1, fixed_point_position);
- Tensor bias = create_tensor<Tensor>(bias_shape, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(dst_shape, data_type, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(src_shape, data_type, 1);
+ Tensor weights = create_tensor<Tensor>(ws, data_type, 1);
+ Tensor bias = create_tensor<Tensor>(bias_shape, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(dst_shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -192,9 +189,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<float>, framework:
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp
index e0f63a8a2d..9c64131a61 100644
--- a/tests/validation/NEON/GEMM.cpp
+++ b/tests/validation/NEON/GEMM.cpp
@@ -98,14 +98,11 @@ TEST_SUITE_END() // INTERLEAVE_4X4
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- Tensor a = create_tensor<Tensor>(shape_a, data_type, 1, fixed_point_position);
- Tensor b = create_tensor<Tensor>(shape_b, data_type, 1, fixed_point_position);
- Tensor c = create_tensor<Tensor>(shape_c, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position);
+ Tensor a = create_tensor<Tensor>(shape_a, data_type, 1);
+ Tensor b = create_tensor<Tensor>(shape_b, data_type, 1);
+ Tensor c = create_tensor<Tensor>(shape_c, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -153,9 +150,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMFixture<float>, framework::DatasetMode::N
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NEGEMMFixedPointFixture = GEMMValidationFixedPointFixture<Tensor, Accessor, NEGEMM, T>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp
index 8c66611f49..02cca0b452 100644
--- a/tests/validation/NEON/NormalizationLayer.cpp
+++ b/tests/validation/NEON/NormalizationLayer.cpp
@@ -50,9 +50,6 @@ constexpr AbsoluteTolerance<float> tolerance_f16(0.001f);
constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
/** Input data set. */
-const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
- framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
- framework::dataset::make("IsScaled", { true }));
const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
framework::dataset::make("IsScaled", { true }));
@@ -132,9 +129,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture<float>, framework::
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NENormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<Tensor, Accessor, NENormalizationLayer, T>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index 8762f1f7cc..bbfca46ca9 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -50,11 +50,6 @@ const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingType
framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
framework::dataset::make("ExcludePadding", { true, false }));
-/** Input data set for quantized data types */
-const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })),
- framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
- framework::dataset::make("ExcludePadding", { false }));
-
/** Input data set for asymmetric data type */
const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(4, 4), Size2D(9, 9), Size2D(3, 7), Size2D(7, 8) })),
@@ -159,9 +154,6 @@ TEST_SUITE_END() // FP16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE_END() // Float
-template <typename T>
-using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<Tensor, Accessor, NEPoolingLayer, T>;
-
TEST_SUITE(Quantized)
template <typename T>
diff --git a/tests/validation/NEON/SYSTEM/AlexNet.cpp b/tests/validation/NEON/SYSTEM/AlexNet.cpp
index 3fa19e4f03..adcfe72eaa 100644
--- a/tests/validation/NEON/SYSTEM/AlexNet.cpp
+++ b/tests/validation/NEON/SYSTEM/AlexNet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -80,7 +80,7 @@ std::vector<unsigned int> compute_alexnet(DataType dt, unsigned int batches, std
};
NEAlexNetModel network{};
- network.init(dt, 4, batches);
+ network.init(dt, batches);
network.build();
network.allocate();
network.fill(weight_files, bias_files);
diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp
index 8940259f13..5f5cfdd808 100644
--- a/tests/validation/NEON/Scale.cpp
+++ b/tests/validation/NEON/Scale.cpp
@@ -159,8 +159,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
shape_scaled.set(idx_height, src_shape[idx_height] * scale_y);
// Create tensors
- Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- Tensor dst = create_tensor<Tensor>(shape_scaled, data_type, 1, 0, QuantizationInfo(), data_layout);
+ Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, QuantizationInfo(), data_layout);
+ Tensor dst = create_tensor<Tensor>(shape_scaled, data_type, 1, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index b6efc8f9b5..8c0d46bc41 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -66,12 +66,9 @@ TEST_SUITE(SoftmaxLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -156,9 +153,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NESoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<Tensor, Accessor, NESoftmaxLayer, T>;
-
-template <typename T>
using NESoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<Tensor, Accessor, NESoftmaxLayer, T>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index e212c7bd9b..d29d67c8e6 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,17 +47,16 @@ class ActivationValidationGenericFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
{
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_data_type = data_type;
_function = function;
ActivationLayerInfo info(function, alpha_beta, alpha_beta);
- _target = compute_target(shape, in_place, info, data_type, fractional_bits, quantization_info);
- _reference = compute_reference(shape, info, data_type, fractional_bits, quantization_info);
+ _target = compute_target(shape, in_place, info, data_type, quantization_info);
+ _reference = compute_reference(shape, info, data_type, quantization_info);
}
protected:
@@ -80,17 +79,17 @@ protected:
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type, _fractional_bits);
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
std::uniform_int_distribution<> distribution(min_bound, max_bound);
library->fill(tensor, distribution, 0);
}
}
- TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
+ TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
- TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
+ TensorType dst = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
// Create and configure function
FunctionType act_layer;
@@ -128,10 +127,10 @@ protected:
}
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info };
+ SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
// Fill reference
fill(src);
@@ -141,7 +140,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
DataType _data_type{};
ActivationLayerInfo::ActivationFunction _function{};
@@ -154,18 +152,7 @@ public:
template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type)
{
- ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, QuantizationInfo());
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ActivationValidationFixedPointFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits)
- {
- ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, fractional_bits, QuantizationInfo());
+ ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo());
}
};
@@ -176,7 +163,7 @@ public:
template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
{
- ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, quantization_info);
+ ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, quantization_info);
}
};
diff --git a/tests/validation/fixtures/ArithmeticAdditionFixture.h b/tests/validation/fixtures/ArithmeticAdditionFixture.h
index 99a56777a2..6d529a843c 100644
--- a/tests/validation/fixtures/ArithmeticAdditionFixture.h
+++ b/tests/validation/fixtures/ArithmeticAdditionFixture.h
@@ -45,11 +45,11 @@ class ArithmeticAdditionGenericFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits,
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
QuantizationInfo quantization_info)
{
- _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, quantization_info);
- _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, quantization_info);
+ _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info);
+ _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info);
}
protected:
@@ -60,12 +60,12 @@ protected:
}
TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
- int fixed_point_position, QuantizationInfo quantization_info)
+ QuantizationInfo quantization_info)
{
// Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, fixed_point_position, quantization_info);
- TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, fixed_point_position, quantization_info);
- TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, fixed_point_position, quantization_info);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, quantization_info);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, quantization_info);
+ TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, quantization_info);
// Create and configure function
FunctionType add;
@@ -95,11 +95,11 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
- int fixed_point_position, QuantizationInfo quantization_info)
+ QuantizationInfo quantization_info)
{
// Create reference
- SimpleTensor<T> ref_src1{ shape0, data_type0, 1, fixed_point_position, quantization_info };
- SimpleTensor<T> ref_src2{ shape1, data_type1, 1, fixed_point_position, quantization_info };
+ SimpleTensor<T> ref_src1{ shape0, data_type0, 1, quantization_info };
+ SimpleTensor<T> ref_src2{ shape1, data_type1, 1, quantization_info };
// Fill reference
fill(ref_src1, 0);
@@ -117,9 +117,9 @@ class ArithmeticAdditionBroadcastValidationFixedPointFixture : public Arithmetic
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
- ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, 0, QuantizationInfo());
+ ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo());
}
};
@@ -130,7 +130,7 @@ public:
template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
- ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, 0, QuantizationInfo());
+ ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo());
}
};
@@ -139,9 +139,9 @@ class ArithmeticAdditionValidationFixedPointFixture : public ArithmeticAdditionG
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
- ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, QuantizationInfo());
+ ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo());
}
};
@@ -152,7 +152,7 @@ public:
template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
- ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type0, data_type1, output_data_type, convert_policy, 0);
+ ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type0, data_type1, output_data_type, convert_policy);
}
};
@@ -163,7 +163,7 @@ public:
template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, QuantizationInfo quantization_info)
{
- ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, 0, quantization_info);
+ ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, quantization_info);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/ArithmeticSubtractionFixture.h b/tests/validation/fixtures/ArithmeticSubtractionFixture.h
index ba0dd14414..04bb53a184 100644
--- a/tests/validation/fixtures/ArithmeticSubtractionFixture.h
+++ b/tests/validation/fixtures/ArithmeticSubtractionFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,11 +45,10 @@ class ArithmeticSubtractionValidationFixedPointFixture : public framework::Fixtu
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+ void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
- _fractional_bits = fractional_bits;
- _target = compute_target(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
- _reference = compute_reference(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
+ _target = compute_target(shape, data_type0, data_type1, output_data_type, convert_policy);
+ _reference = compute_reference(shape, data_type0, data_type1, output_data_type, convert_policy);
}
protected:
@@ -59,12 +58,12 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position)
+ TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
// Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1, fixed_point_position);
- TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(shape, output_data_type, 1, fixed_point_position);
+ TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1);
+ TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1);
+ TensorType dst = create_tensor<TensorType>(shape, output_data_type, 1);
// Create and configure function
FunctionType sub;
@@ -93,11 +92,11 @@ protected:
return dst;
}
- SimpleTensor<T3> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position)
+ SimpleTensor<T3> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
// Create reference
- SimpleTensor<T1> ref_src1{ shape, data_type0, 1, fixed_point_position };
- SimpleTensor<T2> ref_src2{ shape, data_type1, 1, fixed_point_position };
+ SimpleTensor<T1> ref_src1{ shape, data_type0, 1 };
+ SimpleTensor<T2> ref_src2{ shape, data_type1, 1 };
// Fill reference
fill(ref_src1, 0);
@@ -108,7 +107,6 @@ protected:
TensorType _target{};
SimpleTensor<T3> _reference{};
- int _fractional_bits{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2 = T1, typename T3 = T1>
class ArithmeticSubtractionValidationFixture : public ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>
@@ -117,7 +115,7 @@ public:
template <typename...>
void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
- ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, data_type0, data_type1, output_data_type, convert_policy, 0);
+ ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, data_type0, data_type1, output_data_type, convert_policy);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index b7e32a6f37..bc3b488a4a 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -45,15 +45,14 @@ class BatchNormalizationLayerValidationFixedPointFixture : public framework::Fix
{
public:
template <typename...>
- void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fractional_bits)
+ void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
- _fractional_bits = fractional_bits;
- _data_type = dt;
- _use_beta = use_beta;
- _use_gamma = use_gamma;
+ _data_type = dt;
+ _use_beta = use_beta;
+ _use_gamma = use_gamma;
- _target = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout, fractional_bits);
- _reference = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits);
+ _target = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout);
+ _reference = compute_reference(shape0, shape1, epsilon, act_info, dt);
}
protected:
@@ -93,7 +92,7 @@ protected:
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>(_fractional_bits);
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>();
std::uniform_int_distribution<> distribution(min_bound, max_bound);
std::uniform_int_distribution<> distribution_var(0, max_bound);
library->fill(src_tensor, distribution, 0);
@@ -115,12 +114,12 @@ protected:
else
{
// Fill with default value 1
- library->fill_tensor_value(gamma_tensor, static_cast<T>(1 << (_fractional_bits)));
+ library->fill_tensor_value(gamma_tensor, static_cast<T>(1));
}
}
}
- TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fixed_point_position)
+ TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
if(data_layout == DataLayout::NHWC)
{
@@ -128,12 +127,12 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- TensorType mean = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
- TensorType var = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
- TensorType beta = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
- TensorType gamma = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
+ TensorType src = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
+ TensorType mean = create_tensor<TensorType>(shape1, dt, 1);
+ TensorType var = create_tensor<TensorType>(shape1, dt, 1);
+ TensorType beta = create_tensor<TensorType>(shape1, dt, 1);
+ TensorType gamma = create_tensor<TensorType>(shape1, dt, 1);
// Create and configure function
FunctionType norm;
@@ -172,24 +171,23 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fixed_point_position)
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt)
{
// Create reference
- SimpleTensor<T> ref_src{ shape0, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_mean{ shape1, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_var{ shape1, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_beta{ shape1, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_gamma{ shape1, dt, 1, fixed_point_position };
+ SimpleTensor<T> ref_src{ shape0, dt, 1 };
+ SimpleTensor<T> ref_mean{ shape1, dt, 1 };
+ SimpleTensor<T> ref_var{ shape1, dt, 1 };
+ SimpleTensor<T> ref_beta{ shape1, dt, 1 };
+ SimpleTensor<T> ref_gamma{ shape1, dt, 1 };
// Fill reference
fill(ref_src, ref_mean, ref_var, ref_beta, ref_gamma);
- return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info, fixed_point_position);
+ return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info);
}
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
DataType _data_type{};
bool _use_beta{};
bool _use_gamma{};
@@ -202,7 +200,7 @@ public:
template <typename...>
void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
- BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout, 0);
+ BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 00ca0778f5..7ba2583bbe 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -57,12 +57,11 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
- DataType data_type, DataLayout data_layout, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+ DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
_data_type = data_type;
_is_quantized = is_data_type_quantized_asymmetric(data_type);
_bias_data_type = _is_quantized ? DataType::S32 : data_type;
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_data_layout = data_layout;
@@ -117,10 +116,10 @@ protected:
TensorShape reshaped_weights_shape(weights_shape);
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
- TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
- TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info, _data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info, _data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info, _data_layout);
// Create and configure function
FunctionType conv;
@@ -157,9 +156,9 @@ protected:
const Size2D &dilation, const ActivationLayerInfo act_info)
{
// Create reference
- SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
- SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
+ SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info };
+ SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
// Fill reference
fill(src, 0);
@@ -176,7 +175,6 @@ protected:
DataType _data_type{};
DataType _bias_data_type{};
DataLayout _data_layout{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
bool _is_quantized = false;
};
@@ -189,7 +187,7 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
DataLayout data_layout, ActivationLayerInfo act_info)
{
- ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_layout, 0,
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_layout,
QuantizationInfo(), act_info);
}
};
@@ -200,11 +198,11 @@ class ConvolutionValidationFixedPointFixture : public ConvolutionValidationGener
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
- int fractional_bits, ActivationLayerInfo act_info)
+ ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type,
DataLayout::NCHW,
- fractional_bits, QuantizationInfo(), act_info);
+ QuantizationInfo(), act_info);
}
};
@@ -217,7 +215,7 @@ public:
DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
- data_type, data_layout, 0, quantization_info, act_info);
+ data_type, data_layout, quantization_info, act_info);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h
index 137068a4b9..12ce9cefc7 100644
--- a/tests/validation/fixtures/DeconvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h
@@ -45,13 +45,12 @@ class DeconvolutionLayerFixtureBase : public framework::Fixture
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info,
- const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, int fractional_bits)
+ const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type)
{
- _fractional_bits = fractional_bits;
- _data_type = data_type;
+ _data_type = data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, fractional_bits);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, fractional_bits);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
}
protected:
@@ -70,13 +69,13 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
- const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, int fixed_point_position)
+ const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+ TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
// Create and configure function
FunctionType conv;
@@ -110,12 +109,12 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape,
- const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type, int fixed_point_position)
+ const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position };
- SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position };
+ SimpleTensor<T> src{ input_shape, data_type, 1 };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1 };
+ SimpleTensor<T> bias{ bias_shape, data_type, 1 };
// Fill reference
fill(src, 0);
@@ -127,7 +126,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
DataType _data_type{};
};
@@ -146,7 +144,7 @@ public:
const std::pair<unsigned int, unsigned int> inner_border(inner_border_right, inner_border_top);
auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, inner_border.first, inner_border.second, sx, sy);
TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape);
- DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, 0);
+ DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type);
}
};
diff --git a/tests/validation/fixtures/DepthConcatenateLayerFixture.h b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
index 6e112c7359..76b56ad26e 100644
--- a/tests/validation/fixtures/DepthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/DepthConcatenateLayerFixture.h
@@ -102,12 +102,12 @@ protected:
for(const auto &shape : shapes)
{
- srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+ srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
src_ptrs.emplace_back(&srcs.back());
}
TensorShape dst_shape = calculate_depth_concatenate_shape(shapes);
- TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1);
// Create and configure function
FunctionType depth_concat;
@@ -151,7 +151,7 @@ protected:
int i = 0;
for(const auto &shape : shapes)
{
- srcs.emplace_back(shape, data_type, 1, _fractional_bits);
+ srcs.emplace_back(shape, data_type, 1);
fill(srcs.back(), i++);
}
@@ -160,9 +160,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
-
-private:
- int _fractional_bits{ 1 };
};
} // namespace validation
} // namespace test
diff --git a/tests/validation/fixtures/DepthConvertLayerFixture.h b/tests/validation/fixtures/DepthConvertLayerFixture.h
index 4b4e959273..eb1c083667 100644
--- a/tests/validation/fixtures/DepthConvertLayerFixture.h
+++ b/tests/validation/fixtures/DepthConvertLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,12 +45,11 @@ class DepthConvertLayerValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fractional_bits)
+ void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
- _shift = shift;
- _fractional_bits = fractional_bits;
- _target = compute_target(shape, dt_in, dt_out, policy, shift, fractional_bits);
- _reference = compute_reference(shape, dt_in, dt_out, policy, shift, fractional_bits);
+ _shift = shift;
+ _target = compute_target(shape, dt_in, dt_out, policy, shift);
+ _reference = compute_reference(shape, dt_in, dt_out, policy, shift);
}
protected:
@@ -60,11 +59,11 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position)
+ TensorType compute_target(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, dt_in, 1, static_cast<int>(fixed_point_position));
- TensorType dst = create_tensor<TensorType>(shape, dt_out, 1, static_cast<int>(fixed_point_position));
+ TensorType src = create_tensor<TensorType>(shape, dt_in, 1);
+ TensorType dst = create_tensor<TensorType>(shape, dt_out, 1);
// Create and configure function
FunctionType depth_convert;
@@ -89,10 +88,10 @@ protected:
return dst;
}
- SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position)
+ SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
// Create reference
- SimpleTensor<T1> src{ shape, dt_in, 1, static_cast<int>(fixed_point_position) };
+ SimpleTensor<T1> src{ shape, dt_in, 1 };
// Fill reference
fill(src, 0);
@@ -102,7 +101,6 @@ protected:
TensorType _target{};
SimpleTensor<T2> _reference{};
- int _fractional_bits{};
int _shift{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
@@ -112,7 +110,7 @@ public:
template <typename...>
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
- DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift, 0);
+ DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift);
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
@@ -120,9 +118,9 @@ class DepthConvertLayerValidationFractionalBitsFixture : public DepthConvertLaye
{
public:
template <typename...>
- void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t fractional_bits)
+ void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
{
- DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0, fractional_bits);
+ DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 2f01f43d8b..5428154a2b 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -115,10 +115,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, quantization_info, data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, 0, quantization_info, data_layout);
- TensorType biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, 0, quantization_info, data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, quantization_info, data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
+ TensorType biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, quantization_info, data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
// Create Depthwise Convolution configure function
FunctionType dwc;
@@ -155,9 +155,9 @@ protected:
unsigned int depth_multiplier,
const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info)
{
- SimpleTensor<T> src{ in_shape, data_type, 1, 0, quantization_info };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, 0, quantization_info };
- SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, 0, quantization_info };
+ SimpleTensor<T> src{ in_shape, data_type, 1, quantization_info };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
+ SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, quantization_info };
fill(src, 0);
fill(weights, 1);
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index 38ddf33e24..9a58167605 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -54,11 +54,10 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
{
ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_data_type = data_type;
@@ -67,30 +66,29 @@ public:
const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR);
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- TensorInfo input_info = TensorInfo(input_shape, 1, data_type, _fractional_bits);
- TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type, _fractional_bits);
+ TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
+ TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type);
const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
}
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
{
ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
ARM_COMPUTE_UNUSED(dilation);
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_data_type = data_type;
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info);
}
protected:
@@ -124,7 +122,7 @@ protected:
}
TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
+ DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout)
{
if(data_layout == DataLayout::NHWC)
{
@@ -134,10 +132,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
- TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
// Create and configure function
FunctionType conv;
@@ -171,12 +169,12 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+ DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position, quantization_info };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, fixed_point_position, quantization_info };
+ SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
+ SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
// Fill reference
fill(src, 0);
@@ -190,7 +188,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
DataType _data_type{};
};
@@ -203,7 +200,7 @@ public:
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info,
DataLayout data_layout)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, QuantizationInfo(),
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
act_info, data_layout);
}
};
@@ -213,10 +210,10 @@ class DirectConvolutionValidationFixedPointFixture : public DirectConvolutionVal
{
public:
template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits,
+ void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type,
ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, fractional_bits,
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
QuantizationInfo(), act_info, DataLayout::NCHW);
}
};
@@ -229,7 +226,7 @@ public:
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info,
ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, quantization_info,
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
act_info, DataLayout::NCHW);
}
};
@@ -242,7 +239,7 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, quantization_info,
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info,
act_info, DataLayout::NCHW);
}
};
@@ -255,7 +252,7 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
DataType data_type, ActivationLayerInfo act_info)
{
- DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, QuantizationInfo(),
+ DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(),
act_info, DataLayout::NCHW);
}
};
diff --git a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
index 09b6d830b4..144c7b7d0d 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h
@@ -50,9 +50,8 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ DataType data_type, QuantizationInfo quantization_info)
{
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_data_type = data_type;
@@ -62,24 +61,23 @@ public:
const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info);
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
}
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ DataType data_type, QuantizationInfo quantization_info)
{
ARM_COMPUTE_UNUSED(dilation_x, dilation_y);
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_data_type = data_type;
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info);
}
protected:
@@ -113,16 +111,16 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info)
+ DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, quantization_info);
- TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position, quantization_info);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info);
+ TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info);
TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info);
- TensorType dst1 = create_tensor<TensorType>(output_shape1, data_type, 1, fixed_point_position, quantization_info);
+ TensorType dst1 = create_tensor<TensorType>(output_shape1, data_type, 1, quantization_info);
// Create and configure function
FunctionType conv;
@@ -164,14 +162,14 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info)
+ DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position, quantization_info };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, fixed_point_position, quantization_info };
+ SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
+ SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info };
- SimpleTensor<T> dst{ output_shape, data_type, 1, fixed_point_position, quantization_info };
+ SimpleTensor<T> dst{ output_shape, data_type, 1, quantization_info };
TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info);
// Fill reference
@@ -185,7 +183,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
DataType _data_type{};
@@ -212,7 +209,7 @@ public:
template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type)
{
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0,
+ DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
QuantizationInfo());
}
};
@@ -222,10 +219,9 @@ class DirectConvolutionValidationFixedPointTensorShiftFixture : public DirectCon
{
public:
template <typename...>
- void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits)
+ void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type)
{
DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
- fractional_bits,
QuantizationInfo());
}
};
@@ -237,7 +233,7 @@ public:
template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info)
{
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0,
+ DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type,
quantization_info);
}
};
@@ -250,7 +246,7 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
DataType data_type, QuantizationInfo quantization_info)
{
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, 0,
+ DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
quantization_info);
}
};
@@ -263,7 +259,7 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y,
DataType data_type)
{
- DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, 0,
+ DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type,
QuantizationInfo());
}
};
diff --git a/tests/validation/fixtures/DropoutLayerFixture.h b/tests/validation/fixtures/DropoutLayerFixture.h
index 3a077dbbea..771de30917 100644
--- a/tests/validation/fixtures/DropoutLayerFixture.h
+++ b/tests/validation/fixtures/DropoutLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -97,7 +97,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
};
} // namespace validation
} // namespace test
diff --git a/tests/validation/fixtures/FlattenLayerFixture.h b/tests/validation/fixtures/FlattenLayerFixture.h
index ef94ea83b0..f273e9315c 100644
--- a/tests/validation/fixtures/FlattenLayerFixture.h
+++ b/tests/validation/fixtures/FlattenLayerFixture.h
@@ -53,10 +53,8 @@ public:
template <typename...>
void setup(TensorShape shape, DataType data_type)
{
- _fractional_bits = is_data_type_fixed_point(data_type) ? 4 : 0;
-
TensorShape shape_flatten;
- TensorInfo input_info(shape, 1, data_type, _fractional_bits);
+ TensorInfo input_info(shape, 1, data_type);
shape_flatten = compute_im2col_flatten_shape(&input_info);
_target = compute_target(shape, shape_flatten, data_type);
@@ -68,24 +66,15 @@ protected:
template <typename U>
void fill(U &&tensor)
{
- if(_fractional_bits == 0)
- {
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
- library->fill(tensor, distribution, 0);
- }
- else
- {
- const int one_fixed = 1 << _fractional_bits;
- std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
- library->fill(tensor, distribution, 0);
- }
+ std::uniform_real_distribution<> distribution(-1.f, 1.f);
+ library->fill(tensor, distribution, 0);
}
TensorType compute_target(const TensorShape &shape, const TensorShape &shape_flatten, DataType data_type)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, _fractional_bits);
- TensorType dst = create_tensor<TensorType>(shape_flatten, data_type, 1, _fractional_bits);
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1);
+ TensorType dst = create_tensor<TensorType>(shape_flatten, data_type, 1);
// Create and configure function
FunctionType flatten_layer;
@@ -113,7 +102,7 @@ protected:
SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &shape_flatten, DataType data_type)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type, 1, _fractional_bits };
+ SimpleTensor<T> src{ shape, data_type, 1 };
// Fill reference
fill(src);
@@ -123,7 +112,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
};
} // namespace validation
} // namespace test
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index f23fc207a8..895e43b735 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,14 +54,13 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ DataType data_type, QuantizationInfo quantization_info)
{
ARM_COMPUTE_UNUSED(weights_shape);
ARM_COMPUTE_UNUSED(bias_shape);
_data_type = data_type;
_bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
@@ -126,10 +125,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info);
- TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);
- TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info);
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info);
// Create and configure function.
FunctionType fc;
@@ -158,7 +157,7 @@ protected:
if(!reshape_weights || !transpose_weights)
{
TensorShape tmp_shape(weights_shape);
- RawTensor tmp(tmp_shape, _data_type, 1, _fractional_bits);
+ RawTensor tmp(tmp_shape, _data_type, 1);
// Fill with original shape
fill(tmp, 1);
@@ -199,9 +198,9 @@ protected:
bool reshape_weights)
{
// Create reference
- SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
- SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
+ SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info };
+ SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
// Fill reference
fill(src, 0);
@@ -215,7 +214,6 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
DataType _bias_data_type{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
};
@@ -228,7 +226,7 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- 0, QuantizationInfo());
+ QuantizationInfo());
}
};
@@ -237,11 +235,11 @@ class FullyConnectedLayerValidationFixedPointFixture : public FullyConnectedLaye
{
public:
template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type, int fractional_bits)
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type)
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- fractional_bits, QuantizationInfo());
+ QuantizationInfo());
}
};
@@ -255,7 +253,7 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- 0, quantization_info);
+ quantization_info);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index 8dd2998377..e4762cc5be 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -47,13 +47,12 @@ class GEMMValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type, int fractional_bits)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type)
{
- _fractional_bits = fractional_bits;
- _data_type = data_type;
+ _data_type = data_type;
- _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, fractional_bits);
- _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, fractional_bits);
+ _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
+ _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
}
protected:
@@ -75,13 +74,13 @@ protected:
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta,
- DataType data_type, int fixed_point_position)
+ DataType data_type)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
- TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
- TensorType c = create_tensor<TensorType>(shape_c, data_type, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position);
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
+ TensorType c = create_tensor<TensorType>(shape_c, data_type, 1);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
// Create and configure function
FunctionType gemm;
@@ -120,12 +119,12 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta,
- DataType data_type, int fixed_point_position)
+ DataType data_type)
{
// Create reference
- SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position };
- SimpleTensor<T> b{ shape_b, data_type, 1, fixed_point_position };
- SimpleTensor<T> c{ shape_c, data_type, 1, fixed_point_position };
+ SimpleTensor<T> a{ shape_a, data_type, 1 };
+ SimpleTensor<T> b{ shape_b, data_type, 1 };
+ SimpleTensor<T> c{ shape_c, data_type, 1 };
// Fill reference
fill(a, 0);
@@ -137,7 +136,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
DataType _data_type{};
};
@@ -148,7 +146,7 @@ public:
template <typename...>
void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type)
{
- GEMMValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, 0);
+ GEMMValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
index 1f0a7429f1..9ad730c8e2 100644
--- a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
+++ b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,14 +47,13 @@ class GEMMInterleave4x4ValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(size_t x, size_t y, DataType data_type, int fractional_bits)
+ void setup(size_t x, size_t y, DataType data_type)
{
- _fractional_bits = fractional_bits;
- _data_type = data_type;
+ _data_type = data_type;
const TensorShape shape_a(x, y);
const TensorShape shape_b(static_cast<size_t>(x * 4.f), static_cast<size_t>(std::ceil(y / 4.f)));
- _target = compute_target(shape_a, shape_b, data_type, fractional_bits);
- _reference = compute_reference(shape_a, shape_b, data_type, fractional_bits);
+ _target = compute_target(shape_a, shape_b, data_type);
+ _reference = compute_reference(shape_a, shape_b, data_type);
}
protected:
@@ -76,11 +75,11 @@ protected:
}
}
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
- TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
// Create and configure function
FunctionType f;
@@ -105,11 +104,11 @@ protected:
return b;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+ SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
{
// Create reference
- SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position };
- SimpleTensor<T> b{ shape_b, data_type, 1, fixed_point_position };
+ SimpleTensor<T> a{ shape_a, data_type, 1 };
+ SimpleTensor<T> b{ shape_b, data_type, 1 };
// Fill reference
fill(a, 0);
@@ -120,7 +119,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
DataType _data_type{};
};
@@ -131,7 +129,7 @@ public:
template <typename...>
void setup(size_t x, size_t y, DataType data_type)
{
- GEMMInterleave4x4ValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type, 0);
+ GEMMInterleave4x4ValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type);
}
};
diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
index d83d5e9c06..48fa55e8cc 100644
--- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h
+++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,15 +47,14 @@ class GEMMTranspose1xWValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(size_t x, size_t y, DataType data_type, int fractional_bits)
+ void setup(size_t x, size_t y, DataType data_type)
{
- _fractional_bits = fractional_bits;
- _data_type = data_type;
+ _data_type = data_type;
const TensorShape shape_a(x, y);
const unsigned int transpose_w = 16 / data_size_from_type(data_type);
const TensorShape shape_b(static_cast<size_t>(y * transpose_w), static_cast<size_t>(std::ceil(x / static_cast<float>(transpose_w))));
- _target = compute_target(shape_a, shape_b, data_type, fractional_bits);
- _reference = compute_reference(shape_a, shape_b, data_type, fractional_bits);
+ _target = compute_target(shape_a, shape_b, data_type);
+ _reference = compute_reference(shape_a, shape_b, data_type);
}
protected:
@@ -77,11 +76,11 @@ protected:
}
}
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position);
- TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position);
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
// Create and configure function
FunctionType f;
@@ -107,10 +106,10 @@ protected:
return b;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position)
+ SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type)
{
// Create reference
- SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position };
+ SimpleTensor<T> a{ shape_a, data_type, 1 };
// Fill reference
fill(a, 0);
@@ -120,7 +119,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
DataType _data_type{};
};
@@ -131,7 +129,7 @@ public:
template <typename...>
void setup(size_t x, size_t y, DataType data_type)
{
- GEMMTranspose1xWValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type, 0);
+ GEMMTranspose1xWValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type);
}
};
diff --git a/tests/validation/fixtures/Im2ColFixture.h b/tests/validation/fixtures/Im2ColFixture.h
index 6e532e7803..6abea27102 100644
--- a/tests/validation/fixtures/Im2ColFixture.h
+++ b/tests/validation/fixtures/Im2ColFixture.h
@@ -81,8 +81,8 @@ protected:
TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, _quant_info, _data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, _quant_info);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, _quant_info, _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, _quant_info);
// Create and configure function
FunctionType im2col_func;
@@ -110,8 +110,8 @@ protected:
void compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, 0, _quant_info, _data_layout };
- _reference = SimpleTensor<T>(output_shape, data_type, 1, 0, _quant_info, DataLayout::NCHW);
+ SimpleTensor<T> src{ input_shape, data_type, 1, _quant_info, _data_layout };
+ _reference = SimpleTensor<T>(output_shape, data_type, 1, _quant_info, DataLayout::NCHW);
// Fill reference
fill(src);
reference::im2col<T>(src, _reference, _kernel_dims, _conv_info, _has_bias);
diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h
index e7d83c7735..f4f9c64944 100644
--- a/tests/validation/fixtures/NormalizationLayerFixture.h
+++ b/tests/validation/fixtures/NormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,40 +43,30 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class NormalizationValidationFixedPointFixture : public framework::Fixture
+class NormalizationValidationGenericFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, int fractional_bits)
+ void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type)
{
- _fractional_bits = fractional_bits;
NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled);
- _target = compute_target(shape, info, data_type, fractional_bits);
- _reference = compute_reference(shape, info, data_type, fractional_bits);
+ _target = compute_target(shape, info, data_type);
+ _reference = compute_reference(shape, info, data_type);
}
protected:
template <typename U>
void fill(U &&tensor)
{
- if(_fractional_bits == 0)
- {
- library->fill_tensor_uniform(tensor, 0);
- }
- else
- {
- const int one_fixed = 1 << _fractional_bits;
- std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
- library->fill(tensor, distribution, 0);
- }
+ library->fill_tensor_uniform(tensor, 0);
}
- TensorType compute_target(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0)
+ TensorType compute_target(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1);
+ TensorType dst = create_tensor<TensorType>(shape, data_type, 1);
// Create and configure function
FunctionType norm_layer;
@@ -101,10 +91,10 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position };
+ SimpleTensor<T> src{ shape, data_type, 1 };
// Fill reference
fill(src);
@@ -114,17 +104,16 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class NormalizationValidationFixture : public NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+class NormalizationValidationFixture : public NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type)
{
- NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type, 0);
+ NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index 27b033a06c..24539545ca 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -47,14 +47,13 @@ class PoolingLayerValidationGenericFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, int fractional_bits, QuantizationInfo quantization_info)
+ void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
{
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_pool_info = pool_info;
- _target = compute_target(shape, pool_info, data_type, data_layout, fractional_bits, quantization_info);
- _reference = compute_reference(shape, pool_info, data_type, fractional_bits, quantization_info);
+ _target = compute_target(shape, pool_info, data_type, data_layout, quantization_info);
+ _reference = compute_reference(shape, pool_info, data_type, quantization_info);
}
protected:
@@ -72,14 +71,14 @@ protected:
}
else
{
- const int one_fixed = 1 << _fractional_bits;
+ const int one_fixed = 1;
std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
library->fill(tensor, distribution, 0);
}
}
TensorType compute_target(TensorShape shape, PoolingLayerInfo info,
- DataType data_type, DataLayout data_layout, int fixed_point_position, QuantizationInfo quantization_info)
+ DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info)
{
// Change shape in case of NHWC.
if(data_layout == DataLayout::NHWC)
@@ -88,7 +87,7 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info, data_layout);
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info, data_layout);
TensorType dst;
// Create and configure function
@@ -115,10 +114,10 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &shape, PoolingLayerInfo info,
- DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
+ DataType data_type, QuantizationInfo quantization_info)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info };
+ SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
// Fill reference
fill(src);
@@ -128,7 +127,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
PoolingLayerInfo _pool_info{};
};
@@ -141,7 +139,7 @@ public:
void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
- data_type, data_layout, 0, QuantizationInfo());
+ data_type, data_layout, QuantizationInfo());
}
};
@@ -150,10 +148,10 @@ class PoolingLayerValidationFixedPointFixture : public PoolingLayerValidationGen
{
public:
template <typename...>
- void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits)
+ void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
- data_type, DataLayout::NCHW, fractional_bits, QuantizationInfo());
+ data_type, DataLayout::NCHW, QuantizationInfo());
}
};
@@ -166,7 +164,7 @@ public:
QuantizationInfo quantization_info, DataLayout data_layout = DataLayout::NCHW)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
- data_type, data_layout, 0, quantization_info);
+ data_type, data_layout, quantization_info);
}
};
@@ -177,7 +175,7 @@ public:
template <typename...>
void setup(TensorShape src_shape, PoolingLayerInfo pool_info, DataType data_type)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW, 0, QuantizationInfo());
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW, QuantizationInfo());
}
};
@@ -188,7 +186,7 @@ public:
template <typename...>
void setup(TensorShape shape, PoolingType pool_type, DataType data_type, DataLayout data_layout = DataLayout::NCHW)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW, 0, QuantizationInfo());
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW, QuantizationInfo());
}
};
diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h
index 05e1bf5ffe..5413147699 100644
--- a/tests/validation/fixtures/ScaleFixture.h
+++ b/tests/validation/fixtures/ScaleFixture.h
@@ -100,7 +100,7 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, _data_type, 1, 0, QuantizationInfo(), data_layout);
+ TensorType src = create_tensor<TensorType>(shape, _data_type, 1, QuantizationInfo(), data_layout);
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
@@ -108,7 +108,7 @@ protected:
TensorShape shape_scaled(shape);
shape_scaled.set(idx_width, shape[idx_width] * scale_x);
shape_scaled.set(idx_height, shape[idx_height] * scale_y);
- TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, 0, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, QuantizationInfo(), data_layout);
// Create and configure function
FunctionType scale;
@@ -137,7 +137,7 @@ protected:
InterpolationPolicy policy, BorderMode border_mode, T constant_border_value, SamplingPolicy sampling_policy)
{
// Create reference
- SimpleTensor<T> src{ shape, _data_type, 1, 0, QuantizationInfo() };
+ SimpleTensor<T> src{ shape, _data_type, 1, QuantizationInfo() };
// Fill reference
fill(src);
diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h
index c2ab2e2ef6..59ce5192ff 100644
--- a/tests/validation/fixtures/SoftmaxLayerFixture.h
+++ b/tests/validation/fixtures/SoftmaxLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,13 +47,12 @@ class SoftmaxValidationGenericFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type, int fractional_bits, QuantizationInfo quantization_info, float beta)
+ void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta)
{
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
- _target = compute_target(shape, data_type, fractional_bits, quantization_info, beta);
- _reference = compute_reference(shape, data_type, fractional_bits, quantization_info, beta);
+ _target = compute_target(shape, data_type, quantization_info, beta);
+ _reference = compute_reference(shape, data_type, quantization_info, beta);
}
protected:
@@ -72,18 +71,18 @@ protected:
}
else
{
- const int one_fixed = 1 << _fractional_bits;
+ const int one_fixed = 1;
std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
library->fill(tensor, distribution, 0);
}
}
- TensorType compute_target(const TensorShape &shape, DataType data_type, int fixed_point_position,
+ TensorType compute_target(const TensorShape &shape, DataType data_type,
QuantizationInfo quantization_info, float beta)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
- TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256, 0));
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
+ TensorType dst = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(1.f / 256, 0));
// Create and configure function
FunctionType smx_layer;
@@ -108,11 +107,11 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, int fixed_point_position,
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type,
QuantizationInfo quantization_info, float beta)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info };
+ SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
// Fill reference
fill(src);
@@ -122,7 +121,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
};
@@ -135,7 +133,6 @@ public:
{
SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
data_type,
- 0,
QuantizationInfo(),
beta);
}
@@ -146,11 +143,10 @@ class SoftmaxValidationFixedPointFixture : public SoftmaxValidationGenericFixtur
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type, int fixed_point_position)
+ void setup(TensorShape shape, DataType data_type)
{
SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
data_type,
- fixed_point_position,
QuantizationInfo(),
1.0f);
}
@@ -165,7 +161,6 @@ public:
{
SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape,
data_type,
- 0,
quantization_info,
beta);
}
diff --git a/tests/validation/fixtures/WidthConcatenateLayerFixture.h b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
index cf9b12eab6..caad0feee0 100644
--- a/tests/validation/fixtures/WidthConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
@@ -92,12 +92,12 @@ protected:
for(const auto &shape : shapes)
{
- srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+ srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1));
src_ptrs.emplace_back(&srcs.back());
}
TensorShape dst_shape = misc::shape_calculator::calculate_width_concatenate_shape(src_ptrs);
- TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1);
// Create and configure function
FunctionType width_concat;
@@ -141,7 +141,7 @@ protected:
int i = 0;
for(const auto &shape : shapes)
{
- srcs.emplace_back(shape, data_type, 1, _fractional_bits);
+ srcs.emplace_back(shape, data_type, 1);
fill(srcs.back(), i++);
}
@@ -150,9 +150,6 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
-
-private:
- int _fractional_bits{ 1 };
};
} // namespace validation
} // namespace test
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index ac168ebe3c..f1660e6e90 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -201,10 +201,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
// Create and configure function
FunctionType conv;
@@ -340,8 +340,8 @@ protected:
permute(input_shape, PermutationVector(2U, 0U, 1U));
}
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo());
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
// Create and configure function
FunctionType transf;
@@ -369,7 +369,7 @@ protected:
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo() };
+ SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
// Fill reference
fill(src, 0, -1.f, 1.f);
@@ -424,8 +424,8 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo());
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo());
// Create and configure function
FunctionType filter_transform;
@@ -452,7 +452,7 @@ protected:
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo() };
+ SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() };
// Fill reference
fill(src, 0, -1.f, 1.f);
@@ -502,7 +502,7 @@ protected:
// Create tensors
TensorType src = create_tensor<TensorType>(input_shape, data_type);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
// Create and configure function
FunctionType output_transform;
diff --git a/tests/validation/reference/AbsoluteDifference.cpp b/tests/validation/reference/AbsoluteDifference.cpp
index f518e67324..f9fce5b42a 100644
--- a/tests/validation/reference/AbsoluteDifference.cpp
+++ b/tests/validation/reference/AbsoluteDifference.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,7 +24,6 @@
#include "AbsoluteDifference.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/Accumulate.cpp b/tests/validation/reference/Accumulate.cpp
index 29a2007bbd..7f34be9663 100644
--- a/tests/validation/reference/Accumulate.cpp
+++ b/tests/validation/reference/Accumulate.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,7 +24,6 @@
#include "Accumulate.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index df7f6534bc..9455effd72 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,7 +24,6 @@
#include "ActivationLayer.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
@@ -39,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const T a(info.a());
@@ -92,68 +91,6 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
return dst;
}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info)
-{
- using namespace fixed_point_arithmetic;
-
- // Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
-
- // Compute reference
- const int fixed_point_position = src.fixed_point_position();
- const fixed_point<T> a(info.a(), fixed_point_position);
- const fixed_point<T> b(info.b(), fixed_point_position);
- const fixed_point<T> const_0(0, fixed_point_position);
- const fixed_point<T> const_1(1, fixed_point_position);
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- fixed_point<T> x(src[i], fixed_point_position, true);
-
- switch(info.activation())
- {
- case ActivationLayerInfo::ActivationFunction::ABS:
- dst[i] = abs(x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LINEAR:
- dst[i] = add(b, mul(a, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LOGISTIC:
- dst[i] = (const_1 / (const_1 + exp(-x))).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::RELU:
- dst[i] = max(const_0, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- dst[i] = min(a, max(const_0, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
- dst[i] = min(a, max(b, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- dst[i] = (x > const_0) ? x.raw() : mul(a, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
- dst[i] = log(const_1 + exp(x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SQRT:
- dst[i] = (const_1 / inv_sqrt(x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SQUARE:
- dst[i] = mul(x, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::TANH:
- dst[i] = mul(a, tanh(mul(b, x))).raw();
- break;
- default:
- ARM_COMPUTE_ERROR("Unsupported activation function");
- }
- }
-
- return dst;
-}
-
template <>
SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info)
{
@@ -165,8 +102,6 @@ SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src
template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info);
template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info);
-template SimpleTensor<qint8_t> activation_layer(const SimpleTensor<qint8_t> &src, ActivationLayerInfo info);
-template SimpleTensor<qint16_t> activation_layer(const SimpleTensor<qint16_t> &src, ActivationLayerInfo info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ArithmeticAddition.cpp b/tests/validation/reference/ArithmeticAddition.cpp
index f26838dcb8..4569277103 100644
--- a/tests/validation/reference/ArithmeticAddition.cpp
+++ b/tests/validation/reference/ArithmeticAddition.cpp
@@ -24,7 +24,6 @@
#include "ArithmeticAddition.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/ArithmeticDivision.cpp b/tests/validation/reference/ArithmeticDivision.cpp
index 934e89052f..0102231993 100644
--- a/tests/validation/reference/ArithmeticDivision.cpp
+++ b/tests/validation/reference/ArithmeticDivision.cpp
@@ -24,7 +24,6 @@
#include "ArithmeticDivision.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
#include "tests/validation/Helpers.h"
namespace arm_compute
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index c8badacc79..3d1a6ed7d7 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -36,56 +36,11 @@ namespace validation
{
namespace reference
{
-// Batch Normalization Layer for fixed point type
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *>
-SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info, int fixed_point_position)
-{
- ARM_COMPUTE_UNUSED(act_info);
- SimpleTensor<T> result(src.shape(), src.data_type());
-
- const auto cols = static_cast<int>(src.shape()[0]);
- const auto rows = static_cast<int>(src.shape()[1]);
- const auto depth = static_cast<int>(src.shape()[2]);
- const int upper_dims = src.shape().total_size() / (cols * rows * depth);
-
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int i = 0; i < depth; ++i)
- {
- for(int k = 0; k < rows; ++k)
- {
- for(int l = 0; l < cols; ++l)
- {
- const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
-
- fixed_point_arithmetic::fixed_point<T> src_qs(src[pos], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
-
- auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
- auto numerator = src_qs - mean_qs;
- auto x_bar = numerator * denominator;
- x_bar = beta_qs + x_bar * gamma_qs;
- result[pos] = x_bar.raw();
- }
- }
- }
- }
-
- return result;
-}
-
// Batch Normalization Layer for floating point type
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *>
SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info, int fixed_point_position)
+ ActivationLayerInfo act_info)
{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
SimpleTensor<T> result(src.shape(), src.data_type());
const auto cols = static_cast<int>(src.shape()[0]);
@@ -119,14 +74,10 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp
return result;
}
template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta,
- const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta,
- const SimpleTensor<int8_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta,
- const SimpleTensor<int16_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+ const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info);
template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var,
const SimpleTensor<half> &beta,
- const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+ const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info);
} // namespace reference
} // namespace validation
diff --git a/tests/validation/reference/BatchNormalizationLayer.h b/tests/validation/reference/BatchNormalizationLayer.h
index 329909dab4..b45d820412 100644
--- a/tests/validation/reference/BatchNormalizationLayer.h
+++ b/tests/validation/reference/BatchNormalizationLayer.h
@@ -37,13 +37,11 @@ namespace reference
{
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info,
- int fixed_point_position);
+ ActivationLayerInfo act_info);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info,
- int fixed_point_position);
+ ActivationLayerInfo act_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ChannelShuffle.cpp b/tests/validation/reference/ChannelShuffle.cpp
index c4d8d50e3d..b8aa9203ab 100644
--- a/tests/validation/reference/ChannelShuffle.cpp
+++ b/tests/validation/reference/ChannelShuffle.cpp
@@ -39,7 +39,7 @@ template <typename T>
SimpleTensor<T> channel_shuffle(const SimpleTensor<T> &src, int num_groups)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), src.num_channels(), src.quantization_info() };
const int M = src.shape()[0];
const int N = src.shape()[1];
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index fe558ba4af..00c839d2df 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -108,7 +108,7 @@ SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor
const Size2D &dilation)
{
// Create reference
- SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.quantization_info() };
if(src.data_layout() == DataLayout::NHWC)
{
@@ -128,10 +128,6 @@ template SimpleTensor<float> convolution_layer(const SimpleTensor<float> &src, c
const PadStrideInfo &info, const Size2D &dilation);
template SimpleTensor<half> convolution_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &output_shape,
const PadStrideInfo &info, const Size2D &dilation);
-template SimpleTensor<qint8_t> convolution_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &output_shape,
- const PadStrideInfo &info, const Size2D &dilation);
-template SimpleTensor<qint16_t> convolution_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &output_shape,
- const PadStrideInfo &info, const Size2D &dilation);
template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
const PadStrideInfo &info, const Size2D &dilation);
} // namespace reference
diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp
index 35437084b8..d073bbf7a1 100644
--- a/tests/validation/reference/DeconvolutionLayer.cpp
+++ b/tests/validation/reference/DeconvolutionLayer.cpp
@@ -46,7 +46,7 @@ SimpleTensor<T> deconvolution_layer(const SimpleTensor<T> &src, const SimpleTens
int out_y = src.shape().y() + (src.shape().y() - 1) * (stride_y - 1) + a.second + 2 * info.pad().second;
scaled_shape.set(0, out_x);
scaled_shape.set(1, out_y);
- SimpleTensor<T> scaled{ scaled_shape, src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> scaled{ scaled_shape, src.data_type(), 1 };
const int width_in = src.shape().x();
const int height_in = src.shape().y();
diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp
index 9a7248493d..c9a23520c7 100644
--- a/tests/validation/reference/DepthConcatenateLayer.cpp
+++ b/tests/validation/reference/DepthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -95,8 +95,6 @@ SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
template SimpleTensor<half> depthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<qint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
-template SimpleTensor<qint16_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp
index dd095b8912..022007720a 100644
--- a/tests/validation/reference/DepthConvertLayer.cpp
+++ b/tests/validation/reference/DepthConvertLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,44 +36,6 @@ namespace validation
{
namespace reference
{
-template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_floating_point<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_UNUSED(shift);
-
- using namespace fixed_point_arithmetic;
- SimpleTensor<T2> result(src.shape(), dt_out);
-
- const int fixed_point_position = src.fixed_point_position();
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- result[i] = static_cast<float>(fixed_point<T1>(src[i], fixed_point_position, true));
- }
-
- return result;
-}
-
-template < typename T1, typename T2, typename std::enable_if < std::is_floating_point<T1>::value &&std::is_integral<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_UNUSED(shift);
-
- using namespace fixed_point_arithmetic;
- SimpleTensor<T2> result(src.shape(), dt_out, 1, src.fixed_point_position());
-
- const int fixed_point_position = result.fixed_point_position();
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- result[i] = fixed_point<T2>(src[i], fixed_point_position).raw();
- }
-
- return result;
-}
-
template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_integral<T2>::value &&!std::is_same<T1, T2>::value, int >::type >
SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
{
@@ -126,20 +88,6 @@ SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, Con
return result;
}
-template < typename T1, typename T2, typename std::enable_if < std::is_floating_point<T1>::value &&is_floating_point<T2>::value, int >::type >
-SimpleTensor<T2> depth_convert(const SimpleTensor<T1> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_UNUSED(shift);
-
- SimpleTensor<T2> result(src.shape(), dt_out);
-
- for(int i = 0; i < src.num_elements(); ++i)
- {
- result[i] = static_cast<T2>(src[i]);
- }
-}
-
template SimpleTensor<uint16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<int16_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<int32_t> depth_convert(const SimpleTensor<uint8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
@@ -147,10 +95,6 @@ template SimpleTensor<uint8_t> depth_convert(const SimpleTensor<uint16_t> &src,
template SimpleTensor<uint32_t> depth_convert(const SimpleTensor<uint16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<uint8_t> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
template SimpleTensor<int32_t> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<float> depth_convert(const SimpleTensor<int8_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<float> depth_convert(const SimpleTensor<int16_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<int8_t> depth_convert(const SimpleTensor<float> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
-template SimpleTensor<int16_t> depth_convert(const SimpleTensor<float> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index 10c617e953..d8f3cbae49 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -53,7 +53,7 @@ template <typename T, typename TB>
SimpleTensor<T> depthwise_convolution(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &biases, const TensorShape &dst_shape, const PadStrideInfo &conv_info,
unsigned int depth_multiplier)
{
- SimpleTensor<T> dst{ dst_shape, src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ dst_shape, src.data_type(), 1 };
// Compute reference
const int filter_width = weights.shape().x();
@@ -122,7 +122,7 @@ template <>
SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &biases, const TensorShape &dst_shape,
const PadStrideInfo &conv_info, unsigned int depth_multiplier)
{
- SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<uint8_t> dst{ dst_shape, src.data_type(), 1, src.quantization_info() };
// Create reference
const int input_offset = -src.quantization_info().offset;
diff --git a/tests/validation/reference/FlattenLayer.cpp b/tests/validation/reference/FlattenLayer.cpp
index 44f4d93178..e140d752a0 100644
--- a/tests/validation/reference/FlattenLayer.cpp
+++ b/tests/validation/reference/FlattenLayer.cpp
@@ -36,7 +36,7 @@ namespace reference
template <typename T>
SimpleTensor<T> flatten_layer(const SimpleTensor<T> &src, const TensorShape &shape_flatten)
{
- SimpleTensor<T> dst(shape_flatten, src.data_type(), 1, src.fixed_point_position());
+ SimpleTensor<T> dst(shape_flatten, src.data_type(), 1);
// Note: Since the reference implementation does not use padding bytes, we can copy directly the content of the source tensor
std::copy(src.data(), src.data() + src.num_elements(), dst.data());
@@ -46,8 +46,6 @@ SimpleTensor<T> flatten_layer(const SimpleTensor<T> &src, const TensorShape &sha
template SimpleTensor<float> flatten_layer(const SimpleTensor<float> &src, const TensorShape &shape_flatten);
template SimpleTensor<half> flatten_layer(const SimpleTensor<half> &src, const TensorShape &shape_flatten);
-template SimpleTensor<qint8_t> flatten_layer(const SimpleTensor<qint8_t> &src, const TensorShape &shape_flatten);
-template SimpleTensor<qint16_t> flatten_layer(const SimpleTensor<qint16_t> &src, const TensorShape &shape_flatten);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp
index 5384715ace..3ef10eacea 100644
--- a/tests/validation/reference/FullyConnectedLayer.cpp
+++ b/tests/validation/reference/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,10 +44,8 @@ namespace
// Vector matrix multiply for floating point
template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
- int rows_weights, uint8_t fixed_point_position)
+ int rows_weights)
{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
const T *src_ptr = src.data() + offset_src;
const T *weights_ptr = weights.data();
const TB *bias_ptr = bias.data();
@@ -60,57 +58,16 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
}
}
-// Vector matrix multiply for fixed point type
-template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
-void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
- int rows_weights, uint8_t fixed_point_position)
+// Vector matrix multiply for quantized type
+template < typename T, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&std::is_same<TB, int32_t>::value, int >::type = 0 >
+void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst,
+ int cols_weights, int rows_weights)
{
const T *src_ptr = src.data() + offset_src;
const T *weights_ptr = weights.data();
const TB *bias_ptr = bias.data();
T *dst_ptr = dst.data() + offset_dst;
- using namespace fixed_point_arithmetic;
- using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
- for(int y = 0; y < rows_weights; ++y)
- {
- // Reset accumulator
- fixed_point<promoted_type> acc(0, fixed_point_position);
-
- for(int x = 0; x < cols_weights; ++x)
- {
- const fixed_point<promoted_type> i_value(src_ptr[x], fixed_point_position, true);
- const fixed_point<promoted_type> w_value(weights_ptr[x], fixed_point_position, true);
- acc = acc + i_value * w_value;
- }
-
- // Get the bias
- const fixed_point<T> b(bias_ptr[y], fixed_point_position, true);
-
- // Convert back and accumulate the bias
- fixed_point<T> res(acc);
- res = res + b;
-
- // Store the result
- dst_ptr[y] = res.raw();
-
- weights_ptr += cols_weights;
- }
-}
-
-// Vector matrix multiply for quantized type
-template <>
-void vector_matrix_multiply(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &dst, int offset_src, int offset_dst,
- int cols_weights, int rows_weights, uint8_t fixed_point_position)
-{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
- const uint8_t *src_ptr = src.data() + offset_src;
- const uint8_t *weights_ptr = weights.data();
- const int32_t *bias_ptr = bias.data();
- uint8_t *dst_ptr = dst.data() + offset_dst;
-
const int input_offset = -src.quantization_info().offset;
const float input_scale = src.quantization_info().scale;
const int weights_offset = -weights.quantization_info().offset;
@@ -141,7 +98,7 @@ void vector_matrix_multiply(const SimpleTensor<uint8_t> &src, const SimpleTensor
acc = utility::clamp<int32_t>(acc, 0, 255);
// Store the result
- dst_ptr[y] = static_cast<uint8_t>(acc);
+ dst_ptr[y] = static_cast<T>(acc);
weights_ptr += cols_weights;
}
@@ -152,7 +109,7 @@ template <typename T, typename TB>
SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &dst_shape)
{
// Create reference
- SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.quantization_info() };
// Sanity checks
const int num_batch_dimensions = std::max(0, static_cast<int>(dst_shape.num_dimensions()) - 1);
@@ -183,8 +140,7 @@ SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTe
offset_in,
offset_out,
cols_weights,
- rows_weights,
- src.fixed_point_position());
+ rows_weights);
}
return dst;
@@ -192,8 +148,6 @@ SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTe
template SimpleTensor<float> fully_connected_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &dst_shape);
template SimpleTensor<half> fully_connected_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint8_t> fully_connected_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint16_t> fully_connected_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &dst_shape);
template SimpleTensor<uint8_t> fully_connected_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape);
} // namespace reference
} // namespace validation
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index f9dcfcbdd0..7378ada4ab 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -38,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
{
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
+ SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
// Compute reference
const int M = a.shape().y();
@@ -91,7 +91,7 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
using namespace fixed_point_arithmetic;
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1, c.fixed_point_position() };
+ SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
// Compute reference
using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
@@ -156,8 +156,6 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
-template SimpleTensor<qint8_t> gemm(const SimpleTensor<qint8_t> &a, const SimpleTensor<qint8_t> &b, const SimpleTensor<qint8_t> &c, float alpha, float beta);
-template SimpleTensor<qint16_t> gemm(const SimpleTensor<qint16_t> &a, const SimpleTensor<qint16_t> &b, const SimpleTensor<qint16_t> &c, float alpha, float beta);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/LocallyConnected.cpp b/tests/validation/reference/LocallyConnected.cpp
index 08e3f02761..ecc582b181 100644
--- a/tests/validation/reference/LocallyConnected.cpp
+++ b/tests/validation/reference/LocallyConnected.cpp
@@ -41,7 +41,7 @@ template <typename T, typename TB>
SimpleTensor<T> locally_connected(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape, const PadStrideInfo &info)
{
// Create reference
- SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.quantization_info() };
// Compute reference
const int width_in = src.shape().x();
diff --git a/tests/validation/reference/NormalizationLayer.cpp b/tests/validation/reference/NormalizationLayer.cpp
index 226af96fe3..85872c8f90 100644
--- a/tests/validation/reference/NormalizationLayer.cpp
+++ b/tests/validation/reference/NormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLayerInfo info)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const uint32_t norm_size = info.norm_size();
@@ -152,7 +152,7 @@ SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLay
using namespace fixed_point_arithmetic;
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const int fixed_point_position = src.fixed_point_position();
@@ -267,8 +267,6 @@ SimpleTensor<T> normalization_layer(const SimpleTensor<T> &src, NormalizationLay
template SimpleTensor<float> normalization_layer(const SimpleTensor<float> &src, NormalizationLayerInfo info);
template SimpleTensor<half> normalization_layer(const SimpleTensor<half> &src, NormalizationLayerInfo info);
-template SimpleTensor<qint8_t> normalization_layer(const SimpleTensor<qint8_t> &src, NormalizationLayerInfo info);
-template SimpleTensor<qint16_t> normalization_layer(const SimpleTensor<qint16_t> &src, NormalizationLayerInfo info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/Permute.cpp b/tests/validation/reference/Permute.cpp
index bbb2e8d4d7..29c3c5cda8 100644
--- a/tests/validation/reference/Permute.cpp
+++ b/tests/validation/reference/Permute.cpp
@@ -42,7 +42,7 @@ SimpleTensor<T> permute(const SimpleTensor<T> &src, PermutationVector perm)
permute(dst_shape, perm);
// Create reference
- SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.quantization_info() };
// Compute reference
for(int i = 0; i < src.num_elements(); ++i)
diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp
index 69734545c9..e9054b9043 100644
--- a/tests/validation/reference/PoolingLayer.cpp
+++ b/tests/validation/reference/PoolingLayer.cpp
@@ -44,7 +44,7 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo
ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
// Create reference
- SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1 };
const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
@@ -152,128 +152,6 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo
return dst;
}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info)
-{
- ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
-
- const auto w_src = static_cast<int>(src.shape()[0]);
- const auto h_src = static_cast<int>(src.shape()[1]);
- const int upper_dims = src.shape().total_size() / (w_src * h_src);
-
- const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
- const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
- PoolingType type = info.pool_type();
- int pool_stride_x = info.pad_stride_info().stride().first;
- int pool_stride_y = info.pad_stride_info().stride().second;
- int pad_left = info.pad_stride_info().pad_left();
- int pad_top = info.pad_stride_info().pad_top();
- int pad_right = info.pad_stride_info().pad_right();
- int pad_bottom = info.pad_stride_info().pad_bottom();
- bool exclude_padding = info.exclude_padding();
-
- // Create reference
- SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1, src.fixed_point_position() };
-
- const auto w_dst = static_cast<int>(dst.shape()[0]);
- const auto h_dst = static_cast<int>(dst.shape()[1]);
-
- if(type == PoolingType::MAX)
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < h_dst; ++h)
- {
- for(int w = 0; w < w_dst; ++w)
- {
- int wstart = w * pool_stride_x - pad_left;
- int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size_x, w_src);
- int hend = std::min(hstart + pool_size_y, h_src);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
-
- T max_val = std::numeric_limits<T>::lowest();
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const T val = src[r * h_src * w_src + y * w_src + x];
- if(val > max_val)
- {
- max_val = val;
- }
- }
- }
-
- dst[r * h_dst * w_dst + h * w_dst + w] = max_val;
- }
- }
- }
- }
- else // Average or l2 pooling
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < h_dst; ++h)
- {
- for(int w = 0; w < w_dst; ++w)
- {
- int wstart = w * pool_stride_x - pad_left;
- int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size_x, w_src + pad_right);
- int hend = std::min(hstart + pool_size_y, h_src + pad_bottom);
- int pool = (hend - hstart) * (wend - wstart);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
- wend = std::min(wend, w_src);
- hend = std::min(hend, h_src);
- // Exclude padding pixels from the average
- if(exclude_padding)
- {
- pool = (hend - hstart) * (wend - wstart);
- }
-
- using namespace fixed_point_arithmetic;
-
- const int fixed_point_position = src.fixed_point_position();
- const fixed_point<T> const_1(1, fixed_point_position);
- const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
- fixed_point<T> avg_val(0, fixed_point_position, true);
-
- if(type == PoolingType::AVG)
- {
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
- avg_val = add(avg_val, in_fp);
- }
- }
- dst[r * h_dst * w_dst + h * w_dst + w] = mul(avg_val, invpool_fp).raw();
- }
- else
- {
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
- avg_val = add(avg_val, mul(in_fp, in_fp));
- }
- }
- auto res = div(const_1, (inv_sqrt(mul(avg_val, invpool_fp))));
- dst[r * h_dst * w_dst + h * w_dst + w] = res.raw();
- }
- }
- }
- }
- }
-
- return dst;
-}
-
template <>
SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info)
{
@@ -285,8 +163,6 @@ SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, c
template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info);
template SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, const PoolingLayerInfo &info);
-template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, const PoolingLayerInfo &info);
-template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, const PoolingLayerInfo &info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index 90b9b1f7e2..ae4bcd8f0e 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,7 +38,7 @@ template <typename T, typename std::enable_if<is_floating_point<T>::value, int>:
SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
{
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const int cols = src.shape()[0];
@@ -79,7 +79,7 @@ SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
using namespace fixed_point_arithmetic;
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+ SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
// Compute reference
const int cols = src.shape()[0];
@@ -128,8 +128,6 @@ SimpleTensor<uint8_t> softmax_layer<uint8_t>(const SimpleTensor<uint8_t> &src, f
template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta);
template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta);
-template SimpleTensor<qint8_t> softmax_layer(const SimpleTensor<qint8_t> &src, float beta);
-template SimpleTensor<qint16_t> softmax_layer(const SimpleTensor<qint16_t> &src, float beta);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp
index fe79b4a138..5b89934df5 100644
--- a/tests/validation/reference/WidthConcatenateLayer.cpp
+++ b/tests/validation/reference/WidthConcatenateLayer.cpp
@@ -85,8 +85,6 @@ SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
-template SimpleTensor<qint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
-template SimpleTensor<qint16_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
} // namespace reference
} // namespace validation
} // namespace test