aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/NEON
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/NEON')
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp10
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp3
-rw-r--r--tests/validation/NEON/ArithmeticSubtraction.cpp3
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp15
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp14
-rw-r--r--tests/validation/NEON/DepthConvertLayer.cpp44
-rw-r--r--tests/validation/NEON/DilatedConvolutionLayer.cpp14
-rw-r--r--tests/validation/NEON/DirectConvolutionLayer.cpp3
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp14
-rw-r--r--tests/validation/NEON/GEMM.cpp14
-rw-r--r--tests/validation/NEON/NormalizationLayer.cpp6
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp8
-rw-r--r--tests/validation/NEON/SYSTEM/AlexNet.cpp4
-rw-r--r--tests/validation/NEON/Scale.cpp4
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp10
15 files changed, 44 insertions, 122 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 289ca4870e..dee264c6b8 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -90,12 +90,9 @@ TEST_SUITE(ActivationLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })),
shape, data_type, in_place)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -196,9 +193,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NEActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<Tensor, Accessor, NEActivationLayer, T>;
-
-template <typename T>
using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<Tensor, Accessor, NEActivationLayer, T>;
/** Input data sets. */
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index b01e5d929d..3632c3c207 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -163,9 +163,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<int16_t>, framework
}
TEST_SUITE_END()
-template <typename T>
-using NEArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticAddition, T>;
-
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(F16)
diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp
index fc25465e6d..210ed4578f 100644
--- a/tests/validation/NEON/ArithmeticSubtraction.cpp
+++ b/tests/validation/NEON/ArithmeticSubtraction.cpp
@@ -233,9 +233,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEAriSubU8S16ToS16Fixture, framework::DatasetMo
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T1, typename T2 = T1, typename T3 = T1>
-using NEArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticSubtraction, T1, T2, T3>;
-
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 3a18a0a93b..ca13d26495 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -68,9 +68,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
TensorShape src_dst_shapes = shape0;
if(data_layout == DataLayout::NHWC)
{
@@ -78,12 +75,12 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
}
// Create tensors
- Tensor src = create_tensor<Tensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- Tensor dst = create_tensor<Tensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- Tensor mean = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
- Tensor var = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
- Tensor beta = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
- Tensor gamma = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ Tensor dst = create_tensor<Tensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ Tensor mean = create_tensor<Tensor>(shape1, dt, 1);
+ Tensor var = create_tensor<Tensor>(shape1, dt, 1);
+ Tensor beta = create_tensor<Tensor>(shape1, dt, 1);
+ Tensor gamma = create_tensor<Tensor>(shape1, dt, 1);
// Create and Configure function
NEBatchNormalizationLayer norm;
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 94b38c2c81..591d1424c8 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -154,16 +154,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -246,9 +243,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-
-template <typename T>
using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp
index 2bd3db7075..78070d004e 100644
--- a/tests/validation/NEON/DepthConvertLayer.cpp
+++ b/tests/validation/NEON/DepthConvertLayer.cpp
@@ -66,19 +66,15 @@ template <typename T>
using NEDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint8_t>;
template <typename T>
using NEDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint32_t>;
-template <typename T>
-using NEDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, float>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -116,11 +112,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -157,11 +151,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -199,11 +191,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -240,11 +230,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -281,11 +269,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
@@ -322,11 +308,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
// Create and Configure function
NEDepthConvertLayer depth_convert;
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index e703c67868..7cfffc0c2b 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -106,16 +106,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
CNNDataTypes),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -198,9 +195,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-
-template <typename T>
using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index 4995d881cc..bf5b33c9a2 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -173,9 +173,6 @@ FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixture<float>, framework::D
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NEDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
-
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
{
ActivationLayerInfo(),
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index 3adcf61dc9..174778b8ef 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -68,9 +68,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CNNDataTypes),
src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
TensorShape ws(weights_shape);
// Transpose weights if not done in the function
@@ -92,10 +89,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
}
// Create tensors
- Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, fixed_point_position);
- Tensor weights = create_tensor<Tensor>(ws, data_type, 1, fixed_point_position);
- Tensor bias = create_tensor<Tensor>(bias_shape, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(dst_shape, data_type, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(src_shape, data_type, 1);
+ Tensor weights = create_tensor<Tensor>(ws, data_type, 1);
+ Tensor bias = create_tensor<Tensor>(bias_shape, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(dst_shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -192,9 +189,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<float>, framework:
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp
index e0f63a8a2d..9c64131a61 100644
--- a/tests/validation/NEON/GEMM.cpp
+++ b/tests/validation/NEON/GEMM.cpp
@@ -98,14 +98,11 @@ TEST_SUITE_END() // INTERLEAVE_4X4
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- Tensor a = create_tensor<Tensor>(shape_a, data_type, 1, fixed_point_position);
- Tensor b = create_tensor<Tensor>(shape_b, data_type, 1, fixed_point_position);
- Tensor c = create_tensor<Tensor>(shape_c, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position);
+ Tensor a = create_tensor<Tensor>(shape_a, data_type, 1);
+ Tensor b = create_tensor<Tensor>(shape_b, data_type, 1);
+ Tensor c = create_tensor<Tensor>(shape_c, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -153,9 +150,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMFixture<float>, framework::DatasetMode::N
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NEGEMMFixedPointFixture = GEMMValidationFixedPointFixture<Tensor, Accessor, NEGEMM, T>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp
index 8c66611f49..02cca0b452 100644
--- a/tests/validation/NEON/NormalizationLayer.cpp
+++ b/tests/validation/NEON/NormalizationLayer.cpp
@@ -50,9 +50,6 @@ constexpr AbsoluteTolerance<float> tolerance_f16(0.001f);
constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
/** Input data set. */
-const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
- framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
- framework::dataset::make("IsScaled", { true }));
const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
framework::dataset::make("IsScaled", { true }));
@@ -132,9 +129,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture<float>, framework::
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using NENormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<Tensor, Accessor, NENormalizationLayer, T>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index 8762f1f7cc..bbfca46ca9 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -50,11 +50,6 @@ const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingType
framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
framework::dataset::make("ExcludePadding", { true, false }));
-/** Input data set for quantized data types */
-const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })),
- framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
- framework::dataset::make("ExcludePadding", { false }));
-
/** Input data set for asymmetric data type */
const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(4, 4), Size2D(9, 9), Size2D(3, 7), Size2D(7, 8) })),
@@ -159,9 +154,6 @@ TEST_SUITE_END() // FP16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE_END() // Float
-template <typename T>
-using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<Tensor, Accessor, NEPoolingLayer, T>;
-
TEST_SUITE(Quantized)
template <typename T>
diff --git a/tests/validation/NEON/SYSTEM/AlexNet.cpp b/tests/validation/NEON/SYSTEM/AlexNet.cpp
index 3fa19e4f03..adcfe72eaa 100644
--- a/tests/validation/NEON/SYSTEM/AlexNet.cpp
+++ b/tests/validation/NEON/SYSTEM/AlexNet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -80,7 +80,7 @@ std::vector<unsigned int> compute_alexnet(DataType dt, unsigned int batches, std
};
NEAlexNetModel network{};
- network.init(dt, 4, batches);
+ network.init(dt, batches);
network.build();
network.allocate();
network.fill(weight_files, bias_files);
diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp
index 8940259f13..5f5cfdd808 100644
--- a/tests/validation/NEON/Scale.cpp
+++ b/tests/validation/NEON/Scale.cpp
@@ -159,8 +159,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
shape_scaled.set(idx_height, src_shape[idx_height] * scale_y);
// Create tensors
- Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- Tensor dst = create_tensor<Tensor>(shape_scaled, data_type, 1, 0, QuantizationInfo(), data_layout);
+ Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, QuantizationInfo(), data_layout);
+ Tensor dst = create_tensor<Tensor>(shape_scaled, data_type, 1, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index b6efc8f9b5..8c0d46bc41 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -66,12 +66,9 @@ TEST_SUITE(SoftmaxLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+ Tensor src = create_tensor<Tensor>(shape, data_type, 1);
+ Tensor dst = create_tensor<Tensor>(shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -156,9 +153,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NESoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<Tensor, Accessor, NESoftmaxLayer, T>;
-
-template <typename T>
using NESoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<Tensor, Accessor, NESoftmaxLayer, T>;
TEST_SUITE(Quantized)