aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/CL
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/CL')
-rw-r--r--tests/validation/CL/ActivationLayer.cpp3
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp3
-rw-r--r--tests/validation/CL/ArithmeticSubtraction.cpp3
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp15
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp14
-rw-r--r--tests/validation/CL/DepthConvertLayer.cpp44
-rw-r--r--tests/validation/CL/DilatedConvolutionLayer.cpp14
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp3
-rw-r--r--tests/validation/CL/FullyConnectedLayer.cpp17
-rw-r--r--tests/validation/CL/GEMM.cpp14
-rw-r--r--tests/validation/CL/NormalizationLayer.cpp7
-rw-r--r--tests/validation/CL/PoolingLayer.cpp3
-rw-r--r--tests/validation/CL/SYSTEM/AlexNet.cpp4
-rw-r--r--tests/validation/CL/SoftmaxLayer.cpp11
-rw-r--r--tests/validation/CL/Winograd.cpp16
15 files changed, 51 insertions, 120 deletions
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index f122f6deeb..4f97d7b6c1 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -221,9 +221,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
-
-template <typename T>
using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
/** Input data sets. */
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 0646c05ad7..256d93f7f5 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -202,9 +202,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture<int16_t>, framework
}
TEST_SUITE_END()
-template <typename T>
-using CLArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
-
TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp
index 4ba5387a61..b19d963515 100644
--- a/tests/validation/CL/ArithmeticSubtraction.cpp
+++ b/tests/validation/CL/ArithmeticSubtraction.cpp
@@ -231,9 +231,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLAriSubU8S16ToS16Fixture, framework::DatasetMo
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T1, typename T2 = T1, typename T3 = T1>
-using CLArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T1, T2, T3>;
-
TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset),
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index de775bf807..0d80ff7eb7 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -67,9 +67,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
TensorShape src_dst_shapes = shape0;
if(data_layout == DataLayout::NHWC)
{
@@ -77,12 +74,12 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
}
// Create tensors
- CLTensor src = create_tensor<CLTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- CLTensor dst = create_tensor<CLTensor>(src_dst_shapes, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
- CLTensor var = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
- CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
- CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ CLTensor dst = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
+ CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1);
+ CLTensor var = create_tensor<CLTensor>(shape1, dt, 1);
+ CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1);
+ CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1);
// Create and Configure function
CLBatchNormalizationLayer norm;
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 7fd29f4d69..30dd8502ca 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -153,16 +153,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
ActivationFunctionsDataset),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -251,9 +248,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-
-template <typename T>
using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
diff --git a/tests/validation/CL/DepthConvertLayer.cpp b/tests/validation/CL/DepthConvertLayer.cpp
index c6e9f75a59..ed1f54ca6e 100644
--- a/tests/validation/CL/DepthConvertLayer.cpp
+++ b/tests/validation/CL/DepthConvertLayer.cpp
@@ -67,19 +67,15 @@ template <typename T>
using CLDepthConvertLayerToU8Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint8_t>;
template <typename T>
using CLDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint32_t>;
-template <typename T>
-using CLDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, float>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U16, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -117,11 +113,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -158,11 +152,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U8, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -200,11 +192,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -241,11 +231,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U32, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::U16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U32, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -282,11 +270,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
@@ -323,11 +309,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
DepthConvertLayerShiftDataset),
shape, policy, shift)
{
- int fixed_point_position = 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::S16, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, DataType::S32, 1);
// Create and Configure function
CLDepthConvertLayer depth_convert;
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index 4b22390b08..fdd6cc812a 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -114,16 +114,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
CNNDataTypes),
input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type)
{
- // Set fixed point position data type allowed
- int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
// Create tensors
- CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -204,9 +201,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-
-template <typename T>
using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index d8b2d7e155..a796b6e4da 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -200,9 +200,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
-
-template <typename T>
using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
template <typename T>
using CLDirectConvolutionValidationWithTensorShapesQuantizedFixture = DirectConvolutionValidationWithTensorShapesQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index 069d8a73ac..9958a88419 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -69,10 +69,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
CNNDataTypes),
src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
- const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo();
+ const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+ const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo();
TensorShape ws(weights_shape);
@@ -85,10 +83,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
}
// Create tensors
- CLTensor src = create_tensor<CLTensor>(src_shape, data_type, 1, fixed_point_position, quantization_info);
- CLTensor weights = create_tensor<CLTensor>(ws, data_type, 1, fixed_point_position, quantization_info);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info);
- CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type, 1, fixed_point_position, quantization_info);
+ CLTensor src = create_tensor<CLTensor>(src_shape, data_type, 1, quantization_info);
+ CLTensor weights = create_tensor<CLTensor>(ws, data_type, 1, quantization_info);
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, quantization_info);
+ CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type, 1, quantization_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -192,9 +190,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
-
-template <typename T>
using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp
index d066281843..639182030e 100644
--- a/tests/validation/CL/GEMM.cpp
+++ b/tests/validation/CL/GEMM.cpp
@@ -86,14 +86,11 @@ TEST_SUITE_END() // INTERLEAVE_4X4
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, fixed_point_position);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, fixed_point_position);
- CLTensor c = create_tensor<CLTensor>(shape_c, data_type, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position);
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1);
+ CLTensor c = create_tensor<CLTensor>(shape_c, data_type, 1);
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -152,9 +149,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMFixture<float>, framework::DatasetMode::N
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using CLGEMMFixedPointFixture = GEMMValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMM, T>;
-
TEST_SUITE(OUTPUT_3D)
TEST_SUITE(Float)
TEST_SUITE(FP32)
diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp
index f6a8e7a9fb..a2dbaff272 100644
--- a/tests/validation/CL/NormalizationLayer.cpp
+++ b/tests/validation/CL/NormalizationLayer.cpp
@@ -52,10 +52,6 @@ const auto NormalizationDataset = combine(combine(combine(combine(datasets::Smal
framework::dataset::make("NormalizationSize", 3, 9, 2)),
framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
framework::dataset::make("IsScaled", { true }));
-const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()),
- framework::dataset::make("NormalizationSize", 3, 9, 2)),
- framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
- framework::dataset::make("IsScaled", { true }));
const auto NormalizationDatasetFP16 = combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })),
framework::dataset::make("NormalizationSize", 3, 9, 2)),
framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })),
@@ -135,9 +131,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLNormalizationLayerFixture<float>, framework::
TEST_SUITE_END()
TEST_SUITE_END()
-template <typename T>
-using CLNormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<CLTensor, CLAccessor, CLNormalizationLayer, T>;
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index b28a5ebca9..0b8a11fe5d 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -148,9 +148,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<half>, framework::Dataset
TEST_SUITE_END() // FP16
TEST_SUITE_END() // Float
-template <typename T>
-using CLPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
-
TEST_SUITE(Quantized)
template <typename T>
diff --git a/tests/validation/CL/SYSTEM/AlexNet.cpp b/tests/validation/CL/SYSTEM/AlexNet.cpp
index 75f8d19651..9be6f2cf53 100644
--- a/tests/validation/CL/SYSTEM/AlexNet.cpp
+++ b/tests/validation/CL/SYSTEM/AlexNet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -79,7 +79,7 @@ std::vector<unsigned int> compute_alexnet(DataType dt, unsigned int batches, std
"cnn_data/alexnet_model/fc8_b.npy"
};
CLAlexNetModel network{};
- network.init(dt, 4, batches);
+ network.init(dt, batches);
network.build();
network.allocate();
network.fill(weight_files, bias_files);
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
index b47f84f8cd..66ca0b8ca7 100644
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ b/tests/validation/CL/SoftmaxLayer.cpp
@@ -64,13 +64,11 @@ TEST_SUITE(SoftmaxLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SoftmaxLayerSmallShapes(), datasets::SoftmaxLayerLargeShapes()), CNNDataTypes), shape, data_type)
{
- // Set fixed point position and quantization info if is allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
- const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(1.f / 255.f, 0) : QuantizationInfo();
+ const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(1.f / 255.f, 0) : QuantizationInfo();
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position, quantization_info);
- CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256.f, 0));
+ CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, quantization_info);
+ CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, QuantizationInfo(1.f / 256.f, 0));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -167,9 +165,6 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLSoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
-
-template <typename T>
using CLSoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
TEST_SUITE(Quantized)
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index f68ec8c286..501afaccf9 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -169,7 +169,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
// Create tensors
- CLTensor in = create_tensor<CLTensor>(shape_in, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor in = create_tensor<CLTensor>(shape_in, data_type, 1, QuantizationInfo(), data_layout);
CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -216,7 +216,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
// Create tensors
- CLTensor in = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor in = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, QuantizationInfo(), data_layout);
CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -296,8 +296,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL,
TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, 0, QuantizationInfo(), data_layout);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, QuantizationInfo(), data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -348,8 +348,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL,
TensorShape shape_b = compute_winograd_filter_transform_shape(tensor_info_in, winograd_info);
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor a = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, QuantizationInfo(), data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -444,7 +444,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create tensors
CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -481,7 +481,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create tensors
CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);