aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-06-18 14:40:56 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:53:09 +0000
commit0fc25454b6ce499b7f89792f91b81a61a42d3182 (patch)
tree0c988cbbef3f5f1967cc2f77672d05a702191349
parentf5f0ecca39c905bc8c1f90e7469eed2221ca662f (diff)
downloadComputeLibrary-0fc25454b6ce499b7f89792f91b81a61a42d3182.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Remove QS8 and QS16 validation and benchmark tests Change-Id: I566f1474c1fafcb3903115ec2d3a003d73e4c93b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133762 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
-rw-r--r--tests/AssetsLibrary.h4
-rw-r--r--tests/Utils.h2
-rw-r--r--tests/benchmark/CL/ActivationLayer.cpp4
-rw-r--r--tests/benchmark/NEON/ActivationLayer.cpp8
-rw-r--r--tests/validation/CL/ActivationLayer.cpp65
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp50
-rw-r--r--tests/validation/CL/ArithmeticSubtraction.cpp52
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp58
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp55
-rw-r--r--tests/validation/CL/DeconvolutionLayer.cpp4
-rw-r--r--tests/validation/CL/DepthConcatenateLayer.cpp36
-rw-r--r--tests/validation/CL/DepthConvertLayer.cpp126
-rw-r--r--tests/validation/CL/DilatedConvolutionLayer.cpp57
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp26
-rw-r--r--tests/validation/CL/FixedPoint/FixedPointTarget.h131
-rw-r--r--tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp96
-rw-r--r--tests/validation/CL/Flatten.cpp32
-rw-r--r--tests/validation/CL/FullyConnectedLayer.cpp60
-rw-r--r--tests/validation/CL/GEMM.cpp95
-rw-r--r--tests/validation/CL/Im2Col.cpp6
-rw-r--r--tests/validation/CL/NormalizationLayer.cpp47
-rw-r--r--tests/validation/CL/PixelWiseMultiplication.cpp37
-rw-r--r--tests/validation/CL/PoolingLayer.cpp51
-rw-r--r--tests/validation/CL/SoftmaxLayer.cpp51
-rw-r--r--tests/validation/CL/WidthConcatenateLayer.cpp36
-rw-r--r--tests/validation/Helpers.h20
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp57
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp50
-rw-r--r--tests/validation/NEON/ArithmeticSubtraction.cpp54
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp60
-rw-r--r--tests/validation/NEON/Col2Im.cpp4
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp49
-rw-r--r--tests/validation/NEON/DeconvolutionLayer.cpp4
-rw-r--r--tests/validation/NEON/DepthConcatenateLayer.cpp36
-rw-r--r--tests/validation/NEON/DepthConvertLayer.cpp143
-rw-r--r--tests/validation/NEON/DilatedConvolutionLayer.cpp53
-rw-r--r--tests/validation/NEON/DirectConvolutionLayer.cpp46
-rw-r--r--tests/validation/NEON/FixedPoint/FixedPoint.cpp135
-rw-r--r--tests/validation/NEON/FixedPoint/FixedPointTarget.h225
-rw-r--r--tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp147
-rw-r--r--tests/validation/NEON/Flatten.cpp32
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp60
-rw-r--r--tests/validation/NEON/GEMM.cpp95
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp2
-rw-r--r--tests/validation/NEON/Im2Col.cpp6
-rw-r--r--tests/validation/NEON/NormalizationLayer.cpp46
-rw-r--r--tests/validation/NEON/PixelWiseMultiplication.cpp20
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp46
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp48
-rw-r--r--tests/validation/UNIT/FixedPoint.cpp211
-rw-r--r--tests/validation/UNIT/TensorInfo.cpp6
-rw-r--r--tests/validation/Validation.cpp4
-rw-r--r--tests/validation/fixtures/FixedPointFixture.h123
-rw-r--r--tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h119
-rw-r--r--tests/validation/reference/FixedPointPixelWiseMultiplication.cpp85
-rw-r--r--tests/validation/reference/FixedPointPixelWiseMultiplication.h43
-rw-r--r--tests/validation/reference/PixelWiseMultiplication.cpp4
-rw-r--r--utils/GraphUtils.cpp2
-rw-r--r--utils/TypePrinter.h6
59 files changed, 58 insertions, 3172 deletions
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index 1fba3d4b46..fd2bc99b0f 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -564,7 +564,6 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
break;
}
case DataType::S8:
- case DataType::QS8:
{
std::uniform_int_distribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max());
fill(tensor, distribution_s8, seed_offset);
@@ -577,7 +576,6 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
break;
}
case DataType::S16:
- case DataType::QS16:
{
std::uniform_int_distribution<int16_t> distribution_s16(std::numeric_limits<int16_t>::lowest(), std::numeric_limits<int16_t>::max());
fill(tensor, distribution_s16, seed_offset);
@@ -653,7 +651,6 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
break;
}
case DataType::S8:
- case DataType::QS8:
{
ARM_COMPUTE_ERROR_ON(!(std::is_same<int8_t, D>::value));
std::uniform_int_distribution<int8_t> distribution_s8(low, high);
@@ -668,7 +665,6 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
break;
}
case DataType::S16:
- case DataType::QS16:
{
ARM_COMPUTE_ERROR_ON(!(std::is_same<int16_t, D>::value));
std::uniform_int_distribution<int16_t> distribution_s16(low, high);
diff --git a/tests/Utils.h b/tests/Utils.h
index 278af414db..332c916498 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -356,14 +356,12 @@ void store_value_with_data_type(void *ptr, T value, DataType data_type)
*reinterpret_cast<uint8_t *>(ptr) = value;
break;
case DataType::S8:
- case DataType::QS8:
*reinterpret_cast<int8_t *>(ptr) = value;
break;
case DataType::U16:
*reinterpret_cast<uint16_t *>(ptr) = value;
break;
case DataType::S16:
- case DataType::QS16:
*reinterpret_cast<int16_t *>(ptr) = value;
break;
case DataType::U32:
diff --git a/tests/benchmark/CL/ActivationLayer.cpp b/tests/benchmark/CL/ActivationLayer.cpp
index ff612e3a3f..69db5a1974 100644
--- a/tests/benchmark/CL/ActivationLayer.cpp
+++ b/tests/benchmark/CL/ActivationLayer.cpp
@@ -48,8 +48,8 @@ namespace benchmark
{
namespace
{
-const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QS8, DataType::QS16 });
-const auto data_types_mobilenet = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QS8, DataType::QS16, DataType::QASYMM8 });
+const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto data_types_mobilenet = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QASYMM8 });
} // namespace
using CLActivationLayerFixture = ActivationLayerFixture<CLTensor, CLActivationLayer, CLAccessor>;
diff --git a/tests/benchmark/NEON/ActivationLayer.cpp b/tests/benchmark/NEON/ActivationLayer.cpp
index e1664d3e59..1c4ea214b7 100644
--- a/tests/benchmark/NEON/ActivationLayer.cpp
+++ b/tests/benchmark/NEON/ActivationLayer.cpp
@@ -49,11 +49,11 @@ namespace benchmark
namespace
{
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QS8, DataType::QS16 });
-const auto data_types_mobilenet = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QS8, DataType::QS16, DataType::QASYMM8 });
+const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+const auto data_types_mobilenet = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QASYMM8 });
#else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-const auto data_types = framework::dataset::make("DataType", { DataType::F32, DataType::QS8, DataType::QS16 });
-const auto data_types_mobilenet = framework::dataset::make("DataType", { DataType::F32, DataType::QS8, DataType::QS16, DataType::QASYMM8 });
+const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
+const auto data_types_mobilenet = framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 });
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
} // namespace
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 5fd0855ded..f122f6deeb 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -99,9 +99,7 @@ AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activ
const auto CNNDataTypes = framework::dataset::make("DataType",
{
DataType::F16,
- DataType::F32,
- DataType::QS8,
- DataType::QS16,
+ DataType::F32
});
/** Input data sets. */
@@ -114,12 +112,9 @@ TEST_SUITE(ActivationLayer)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })),
shape, data_type, in_place)
{
- // Set fixed point position data type allowed
- const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(shape, data_type, 1);
+ CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -165,9 +160,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Unsupported activation
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
@@ -175,9 +167,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(),
})),
framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
@@ -185,11 +174,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
})),
- framework::dataset::make("Expected", { false, false, true, true, false, false, false, true, true })),
+ framework::dataset::make("Expected", { false, false, true, true, false, false })),
input_info, output_info, act_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS);
@@ -237,49 +223,6 @@ TEST_SUITE_END()
template <typename T>
using CLActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// We test for fixed point precision [3,5] because [1,2] and [6,7] ranges cause
-// overflowing issues in most of the transcendentals functions.
-FIXTURE_DATA_TEST_CASE(RunTiny, CLActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 3, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 3, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-FIXTURE_DATA_TEST_CASE(RunTiny, CLActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index e9a892d744..9323ed2934 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -48,10 +48,6 @@ const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::mak
DataType::U8));
const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)),
framework::dataset::make("DataType", DataType::S16));
-const auto ArithmeticAdditionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("DataType", DataType::QS8));
-const auto ArithmeticAdditionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("DataType", DataType::QS16));
const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataType", DataType::F16));
const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
@@ -69,26 +65,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
- framework::dataset::make("Expected", { true, true, false, false, false, false, true })),
+ framework::dataset::make("Expected", { true, true, false, false, false})),
input1_info, input2_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS);
@@ -174,44 +164,6 @@ TEST_SUITE_END()
template <typename T>
using CLArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp
index 43c94a7b9b..4ba5387a61 100644
--- a/tests/validation/CL/ArithmeticSubtraction.cpp
+++ b/tests/validation/CL/ArithmeticSubtraction.cpp
@@ -55,10 +55,6 @@ const auto ArithmeticSubtractionS16U8S16Dataset = combine(combine(framework::dat
framework::dataset::make("DataType", DataType::S16));
const auto ArithmeticSubtractionU8S16S16Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)),
framework::dataset::make("DataType", DataType::S16));
-const auto ArithmeticSubtractionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("DataType", DataType::QS8));
-const auto ArithmeticSubtractionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("DataType", DataType::QS16));
const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataType", DataType::F16));
const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
@@ -76,24 +72,18 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("Expected", { true, true, false, false, false, false, true })),
input1_info, input2_info, output_info, expected)
@@ -244,48 +234,6 @@ TEST_SUITE_END()
template <typename T1, typename T2 = T1, typename T3 = T1>
using CLArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T1, T2, T3>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- ArithmeticSubtractionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- ArithmeticSubtractionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- ArithmeticSubtractionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- ArithmeticSubtractionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE(Float)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset),
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index f6dc6b377c..de775bf807 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -46,8 +46,6 @@ namespace
{
constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
-constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */
const auto act_infos = framework::dataset::make("ActivationInfo",
{
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
@@ -65,7 +63,7 @@ using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
combine(framework::dataset::make("UseBeta", { false, true }),
framework::dataset::make("UseGamma", { false, true }))),
- framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
+ framework::dataset::make("DataType", { DataType::F16, DataType::F32 })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout)
{
@@ -105,50 +103,34 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Unsupported fused activation
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(),
})),
framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(2U), 1, DataType::F16),
TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(5U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f),
- ActivationLayerInfo(),
- ActivationLayerInfo(),
})),
- framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, false, true, true})),
+ framework::dataset::make("Expected", { true, false, false, false, false, false, false})),
input_info, output_info, mvbg_info, act_info, expected)
{
const auto &mean_info = mvbg_info;
@@ -189,42 +171,6 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework
TEST_SUITE_END()
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-template <typename T>
-using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
-
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("UseBeta", false)),
- framework::dataset::make("UseGamma", false)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs8, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("UseBeta", false)),
- framework::dataset::make("UseGamma", false)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs16, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 0c40953524..242c252015 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -48,7 +48,6 @@ namespace
constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
@@ -57,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
{
DataType::F16,
DataType::F32,
- DataType::QS8,
- DataType::QS16,
DataType::QASYMM8,
});
const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
@@ -256,58 +253,6 @@ TEST_SUITE_END()
template <typename T>
using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp
index 758cf3698a..269bf1587b 100644
--- a/tests/validation/CL/DeconvolutionLayer.cpp
+++ b/tests/validation/CL/DeconvolutionLayer.cpp
@@ -105,14 +105,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Non supported data type
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 4), // Non supported data type
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape
TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink
TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0),
}),
framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0),
TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QS8, 5),
+ TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8, 5),
TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11),
TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0),
TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0),
diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp
index 725af88a17..f5a5c230af 100644
--- a/tests/validation/CL/DepthConcatenateLayer.cpp
+++ b/tests/validation/CL/DepthConcatenateLayer.cpp
@@ -98,42 +98,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<float>, framewor
TEST_SUITE_END()
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
- framework::dataset::make("DataType",
- DataType::QS8)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
- framework::dataset::make("DataType",
- DataType::QS8)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/CL/DepthConvertLayer.cpp b/tests/validation/CL/DepthConvertLayer.cpp
index 603f4a07d6..c6e9f75a59 100644
--- a/tests/validation/CL/DepthConvertLayer.cpp
+++ b/tests/validation/CL/DepthConvertLayer.cpp
@@ -51,10 +51,6 @@ const auto DepthConvertLayerU16toU8Dataset = combine(framework::data
const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8));
-const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16));
const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7);
const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7);
} // namespace
@@ -73,10 +69,6 @@ template <typename T>
using CLDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint32_t>;
template <typename T>
using CLDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, float>;
-template <typename T>
-using CLDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int8_t>;
-template <typename T>
-using CLDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int16_t>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
@@ -367,124 +359,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS32Fixture<int16_t>, frame
}
TEST_SUITE_END()
-TEST_SUITE(Quantized_to_FP32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset),
- shape, dt, policy, fixed_point_position)
-{
- int shift = 0;
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::F32, 1, fixed_point_position);
-
- // Create and Configure function
- CLDepthConvertLayer depth_convert;
- depth_convert.configure(&src, &dst, policy, shift);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS8, CLDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerQS8toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS16, CLDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerQS16toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerQS8toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerQS16toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(FP32_to_Quantized)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset),
- shape, dt, policy, fixed_point_position)
-{
- int shift = 0;
-
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, DataType::F32, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
-
- // Create and Configure function
- CLDepthConvertLayer depth_convert;
- depth_convert.configure(&src, &dst, policy, shift);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS8, CLDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerFP32toQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS16, CLDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerFP32toQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerFP32toQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerFP32toQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp
index 25931c0f4c..784e2001c1 100644
--- a/tests/validation/CL/DilatedConvolutionLayer.cpp
+++ b/tests/validation/CL/DilatedConvolutionLayer.cpp
@@ -45,7 +45,6 @@ namespace
{
RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
@@ -54,8 +53,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
{
DataType::F16,
DataType::F32,
- DataType::QS8,
- DataType::QS16,
DataType::QASYMM8,
});
} // namespace
@@ -209,60 +206,6 @@ TEST_SUITE_END()
template <typename T>
using CLGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index 19d914e98e..d8b2d7e155 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -48,8 +48,6 @@ RelativeTolerance<half> tolerance_fp16(half(0.2)); /**< Tolerance for floating
RelativeTolerance<float> tolerance_fp32(0.02f); /**< Tolerance for floating point tests */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
-constexpr AbsoluteTolerance<int8_t> tolerance_qs8(0); /**< Tolerance for fixed point tests */
-constexpr AbsoluteTolerance<int16_t> tolerance_qs16(0); /**< Tolerance for fixed point tests */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance for quantized tests */
/** Direct convolution data set. */
@@ -204,30 +202,6 @@ TEST_SUITE_END()
template <typename T>
using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 2, 7)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs8);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 2, 15)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs16);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
template <typename T>
diff --git a/tests/validation/CL/FixedPoint/FixedPointTarget.h b/tests/validation/CL/FixedPoint/FixedPointTarget.h
deleted file mode 100644
index 920bd374d3..0000000000
--- a/tests/validation/CL/FixedPoint/FixedPointTarget.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_FIXED_POINT_CL_TARGET
-#define ARM_COMPUTE_TEST_FIXED_POINT_CL_TARGET
-
-#include "arm_compute/runtime/CL/CLScheduler.h"
-
-#include "tests/Globals.h"
-#include "tests/Types.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-template <typename TensorType, typename AccessorType, typename T>
-void compute_target_impl(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position, TensorType &src, TensorType &dst)
-{
- std::string fixed_point_operation_kernel;
-#ifndef EMBEDDED_KERNELS
- std::cout << "EMBEDDED_KERNELS NOT DEFINED" << std::endl;
-
- fixed_point_operation_kernel += "#include \"fixed_point.h\"\n";
-#endif /* EMBEDDED_KERNELS */
- fixed_point_operation_kernel +=
- "__kernel void fixed_point_operation_qs8( \n"
- " __global char* src, \n"
- " __global char* dst) \n"
- "{ \n"
- " char16 in = vload16(0, src + get_global_id(0) * 16); \n"
- " if(FIXED_POINT_OP == 0) \n"
- " { \n"
- " vstore16(EXP_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n"
- " } \n"
- " else if(FIXED_POINT_OP == 1) \n"
- " { \n"
- " vstore16(INVSQRT_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n"
- " } \n"
- " else \n"
- " { \n"
- " vstore16(LOG_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n"
- " } \n"
- "} \n"
- "\n";
-
- // Set build options
- std::string build_opts = "-DFIXED_POINT_POS=" + support::cpp11::to_string(fixed_point_position);
- build_opts += " -DDATA_TYPE=qs8";
-
- // Fill tensors.
- int min = 0;
- int max = 0;
- switch(op)
- {
- case(FixedPointOp::EXP):
- min = -(1 << (fixed_point_position - 1));
- max = (1 << (fixed_point_position - 1));
- build_opts += " -DFIXED_POINT_OP=0";
- break;
- case(FixedPointOp::INV_SQRT):
- min = 1;
- max = (dt == DataType::QS8) ? 0x7F : 0x7FFF;
- build_opts += " -DFIXED_POINT_OP=1";
- break;
- case(FixedPointOp::LOG):
- min = (1 << (fixed_point_position - 1));
- max = (dt == DataType::QS8) ? 0x3F : 0x3FFF;
- build_opts += " -DFIXED_POINT_OP=2";
- break;
- default:
- ARM_COMPUTE_ERROR("Fixed point operation not supported");
- break;
- }
-
- std::uniform_int_distribution<> distribution(min, max);
- library->fill(AccessorType(src), distribution, 0);
-
- std::vector<std::string> sources;
-
-#ifndef EMBEDDED_KERNELS
- build_opts += " -I" + CLKernelLibrary::get().get_kernel_path();
-#else /* EMBEDDED_KERNELS */
- sources.push_back(CLKernelLibrary::get().get_program_source("fixed_point.h"));
-#endif /* EMBEDDED_KERNELS */
-
- sources.push_back(fixed_point_operation_kernel);
-
- // Create program
- ::cl::Program program(CLScheduler::get().context(), sources);
-
- // Build program
- program.build(build_opts.c_str());
-
- ::cl::Kernel kernel(program, "fixed_point_operation_qs8", nullptr);
-
- unsigned int idx = 0;
- kernel.setArg(idx++, src.cl_buffer());
- kernel.setArg(idx++, dst.cl_buffer());
-
- ::cl::NDRange gws(shape[0] / 16, 1, 1);
- CLScheduler::get().queue().enqueueNDRangeKernel(kernel, 0, gws);
-}
-} // namespace
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FIXED_POINT_TARGET */
diff --git a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp b/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp
deleted file mode 100644
index 186fc4c673..0000000000
--- a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "FixedPointTarget.h"
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "tests/CL/CLAccessor.h"
-#include "tests/PaddingCalculator.h"
-#include "tests/datasets/ShapeDatasets.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/FixedPointFixture.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-constexpr AbsoluteTolerance<float> tolerance_exp(1.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential)*/
-constexpr AbsoluteTolerance<float> tolerance_invsqrt(4.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) */
-constexpr AbsoluteTolerance<float> tolerance_log(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) */
-} // namespace
-
-TEST_SUITE(CL)
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-
-template <typename T>
-using CLFixedPointFixture = FixedPointValidationFixture<CLTensor, CLAccessor, T>;
-
-TEST_SUITE(Exp)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FixedPointOp", FixedPointOp::EXP)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_exp);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(Log)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FixedPointOp", FixedPointOp::LOG)),
- framework::dataset::make("FractionalBits", 3, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_log);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(Invsqrt)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_invsqrt);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/CL/Flatten.cpp b/tests/validation/CL/Flatten.cpp
index 04fbef6d79..ceaf123f46 100644
--- a/tests/validation/CL/Flatten.cpp
+++ b/tests/validation/CL/Flatten.cpp
@@ -78,38 +78,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLFlattenLayerFixture<half>, framework::Dataset
TEST_SUITE_END()
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLFlattenLayerFixture<int8_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()),
- framework::dataset::make("DataType", DataType::QS8)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFlattenLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::QS8)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLFlattenLayerFixture<int16_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()),
- framework::dataset::make("DataType", DataType::QS16)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFlattenLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::QS16)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index 7db9cf5b70..069d8a73ac 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -47,8 +47,6 @@ RelativeTolerance<float> tolerance_f32(0.05f);
RelativeTolerance<half_float::half> tolerance_f16(half(0.2));
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
-/** Tolerance for fixed point operations */
-constexpr AbsoluteTolerance<float> tolerance_fixed_point(1.f);
/** Tolerance for quantized asymmetric operations */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
@@ -57,8 +55,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
{
DataType::F16,
DataType::F32,
- DataType::QS8,
- DataType::QS16,
DataType::QASYMM8,
});
@@ -119,36 +115,32 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types
- TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::QS8, 2), // Mismatching fixed point position
TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions
TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Wrongly reshaped weights
}),
framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16),
- TensorInfo(TensorShape(315U, 271U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
TensorInfo(TensorShape(217U, 231U), 1, DataType::F32),
TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
})),
framework::dataset::make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32),
- TensorInfo(TensorShape(271U), 1, DataType::QS8, 2),
TensorInfo(TensorShape(192U), 1, DataType::F32),
TensorInfo(TensorShape(192U), 1, DataType::F32),
TensorInfo(TensorShape(271U), 1, DataType::F32),
TensorInfo(TensorShape(271U), 1, DataType::F32),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(271U, 3U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
})),
- framework::dataset::make("TransposeWeights",{ true, true, true, false, true, true })),
- framework::dataset::make("ReshapedWeights",{ false, false, false, false, false, false})),
- framework::dataset::make("Expected", { false, false, true, true, false, false })),
+ framework::dataset::make("TransposeWeights",{ true, true, false, true, true })),
+ framework::dataset::make("ReshapedWeights",{ false, false, false, false, false})),
+ framework::dataset::make("Expected", { false, true, true, false, false })),
input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected)
{
Status status = CLFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false),
@@ -202,52 +194,6 @@ TEST_SUITE_END()
template <typename T>
using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-FIXTURE_DATA_TEST_CASE(RunTiny, CLFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-FIXTURE_DATA_TEST_CASE(RunTiny, CLFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp
index 4e6cf826fa..d066281843 100644
--- a/tests/validation/CL/GEMM.cpp
+++ b/tests/validation/CL/GEMM.cpp
@@ -53,7 +53,6 @@ RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for compari
constexpr float abs_tolerance_f32(
0.0001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for floating point data types in case using relative tolerance fails because of small values */
RelativeTolerance<half_float::half> tolerance_f16(half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
constexpr float tolerance_num = 0.02f; /**< Tolerance number */
const auto data_interleave = framework::dataset::make("M", 8, 14) * framework::dataset::make("N", 7, 14);
@@ -62,8 +61,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
{
DataType::F16,
DataType::F32,
- DataType::QS8,
- DataType::QS16,
});
} // namespace
@@ -84,31 +81,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMInterleave4x4Fixture, framework::DatasetM
}
TEST_SUITE_END() // FP32
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-using CLGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMInterleave4x4, int8_t>;
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave *
- framework::dataset::make("DataType", DataType::QS8)
- * framework::dataset::make("FractionalBits", 1, 7))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-using CLGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMInterleave4x4, int16_t>;
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave *
- framework::dataset::make("DataType", DataType::QS16)
- * framework::dataset::make("FractionalBits", 1, 14))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-
TEST_SUITE_END() // INTERLEAVE_4X4
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
@@ -149,33 +121,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMTranspose1xWFixture, framework::DatasetMo
}
TEST_SUITE_END() // FP32
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-using CLGEMMTranspose1xW = CLSynthetizeFunctionWithZeroConstantBorder<CLGEMMTranspose1xWKernel, 16>;
-using CLGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMTranspose1xW, int8_t>;
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose *
- framework::dataset::make("DataType", DataType::QS8)
- * framework::dataset::make("FractionalBits", 1, 7))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-using CLGEMMTranspose1xW = CLSynthetizeFunctionWithZeroConstantBorder<CLGEMMTranspose1xWKernel, 8>;
-using CLGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMTranspose1xW, int16_t>;
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose *
- framework::dataset::make("DataType", DataType::QS16)
- * framework::dataset::make("FractionalBits", 1, 14))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-
TEST_SUITE_END() //TRANSPOSE_1XW
TEST_SUITE(Float)
@@ -210,46 +155,6 @@ TEST_SUITE_END()
template <typename T>
using CLGEMMFixedPointFixture = GEMMValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMM, T>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE(OUTPUT_3D)
TEST_SUITE(Float)
TEST_SUITE(FP32)
diff --git a/tests/validation/CL/Im2Col.cpp b/tests/validation/CL/Im2Col.cpp
index bfe0665fa9..ac36267c99 100644
--- a/tests/validation/CL/Im2Col.cpp
+++ b/tests/validation/CL/Im2Col.cpp
@@ -58,20 +58,18 @@ using CLIm2Col = CLSynthetizeFunction<CLIm2ColKernel>;
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::U8), // Unsupported data type
TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::F32), // Mismatching data type
- TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Bias not supported with QASYMM8
TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Mismatching shapes
TensorInfo(TensorShape(10U, 12U, 2U, 2U), 1, DataType::QASYMM8),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(18U, 80U, 1U, 2U), 1, DataType::QASYMM8),
})),
- framework::dataset::make("HasBias", { true, true, true, true, false, false })),
- framework::dataset::make("Expected", { false, false, false, false, true, true })),
+ framework::dataset::make("HasBias", { true, true, true, false, false })),
+ framework::dataset::make("Expected", { false, false, false, true, true })),
input_info, output_info, has_bias, expected)
{
diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp
index a3db6e60ba..f6a8e7a9fb 100644
--- a/tests/validation/CL/NormalizationLayer.cpp
+++ b/tests/validation/CL/NormalizationLayer.cpp
@@ -47,10 +47,6 @@ namespace
RelativeTolerance<half> tolerance_f16(half(0.2));
RelativeTolerance<float> tolerance_f32(0.05f);
-/** Tolerance for fixed point operations */
-constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2);
-constexpr AbsoluteTolerance<int16_t> tolerance_qs16(4);
-
/** Input data set. */
const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()),
framework::dataset::make("NormalizationSize", 3, 9, 2)),
@@ -83,7 +79,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
}),
@@ -91,7 +86,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
})),
@@ -100,10 +94,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
NormalizationLayerInfo(NormType::IN_MAP_1D, 4),
NormalizationLayerInfo(NormType::IN_MAP_2D, 5),
NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
- NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
NormalizationLayerInfo(NormType::CROSS_MAP, 5),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, false, true })),
input_info, output_info, norm_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)) == expected, framework::LogLevel::ERRORS);
@@ -145,44 +138,6 @@ TEST_SUITE_END()
template <typename T>
using CLNormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<CLTensor, CLAccessor, CLNormalizationLayer, T>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-FIXTURE_DATA_TEST_CASE(RunTiny, CLNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs8);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs8);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 5
-FIXTURE_DATA_TEST_CASE(RunTiny, CLNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs16);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs16);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/CL/PixelWiseMultiplication.cpp b/tests/validation/CL/PixelWiseMultiplication.cpp
index 6a71175f51..99b084f6bc 100644
--- a/tests/validation/CL/PixelWiseMultiplication.cpp
+++ b/tests/validation/CL/PixelWiseMultiplication.cpp
@@ -28,7 +28,6 @@
#include "tests/datasets/ShapeDatasets.h"
#include "tests/framework/Macros.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h"
#include "tests/validation/fixtures/PixelWiseMultiplicationFixture.h"
namespace arm_compute
@@ -39,7 +38,6 @@ namespace validation
{
namespace
{
-const float scale_unity = 1.f;
const float scale_255 = 1.f / 255.f;
// *INDENT-OFF*
@@ -81,12 +79,6 @@ using CLPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationF
template <typename T>
using CLPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>;
template <typename T>
-using CLPixelWiseMultiplicationToQS8Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, qint8_t>;
-template <typename T>
-using CLPixelWiseMultiplicationToQS16Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, qint16_t>;
-template <typename T>
-using CLFixedPointPixelWiseMultiplicationFixture = FixedPointPixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T>;
-template <typename T>
using CLPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>;
TEST_SUITE(CL)
@@ -101,9 +93,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid scale
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching data type
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Invalid scale
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
@@ -111,9 +100,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS16, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
@@ -121,12 +107,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
- framework::dataset::make("Scale",{ 2.f, 2.f, 2.f, -1.f, 1.f, 1.f, 1.f, 1.f, 3.f})),
- framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })),
+ framework::dataset::make("Scale",{ 2.f, 2.f, 2.f, -1.f, 1.f, 1.f})),
+ framework::dataset::make("Expected", { true, true, false, false, false, false})),
input1_info, input2_info, output_info, scale, expected)
{
bool has_error = bool(CLPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO));
@@ -155,22 +138,6 @@ TEST_SUITE_END() // PixelWiseMultiplication
TEST_SUITE(FixedPointPixelWiseMultiplication)
-TEST_SUITE(QS8)
-
-TEST_SUITE(ScaleUnity)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7)
-TEST_SUITE_END() // ScaleUnity
-
-TEST_SUITE_END() // QS8
-
-TEST_SUITE(QS16)
-
-TEST_SUITE(ScaleUnity)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15)
-TEST_SUITE_END() // ScaleUnity
-
-TEST_SUITE_END() // QS16
-
TEST_SUITE(Broadcast)
PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, BroadcastFixture<float>, PRECOMMIT, SmallShapesBroadcast(), F32, F32, scale_255, TO_NEAREST_UP, VALIDATE(float, 1.f))
TEST_SUITE_END() // Broadcast
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 7bd090cb77..b28a5ebca9 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -49,11 +49,6 @@ const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingType
framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
framework::dataset::make("ExcludePadding", { true, false }));
-/** Input data set for fixed-point data types */
-const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })),
- framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
- framework::dataset::make("ExcludePadding", { true, false }));
-
/** Input data set for asymmetric data type */
const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(5, 7), Size2D(8, 9) })),
framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
@@ -61,8 +56,6 @@ const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::datas
constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */
constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
-constexpr AbsoluteTolerance<float> tolerance_qs16(6); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit fixed-point type */
-constexpr AbsoluteTolerance<float> tolerance_qs8(3); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit fixed-point type */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */
} // namespace
@@ -74,8 +67,6 @@ TEST_SUITE(PoolingLayer)
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), // Invalid parameters
@@ -85,8 +76,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0),
TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5),
- TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11),
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0),
@@ -96,8 +85,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
- PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
- PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)),
PoolingLayerInfo(PoolingType::L2, 3, PadStrideInfo(1, 1, 0, 0)),
@@ -105,7 +92,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
PoolingLayerInfo(PoolingType::MAX),
PoolingLayerInfo(PoolingType::AVG),
})),
- framework::dataset::make("Expected", { false, false, false, true, false, false, false, true, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, false, true, false, true })),
input_info, output_info, pool_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);
@@ -164,42 +151,6 @@ TEST_SUITE_END() // Float
template <typename T>
using CLPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS8))),
- framework::dataset::make("FractionalBits", 1, 4)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs8);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS8))),
- framework::dataset::make("FractionalBits", 1, 4)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs8);
-}
-TEST_SUITE_END() // QS8
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS16))),
- framework::dataset::make("FractionalBits", 1, 12)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs16);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS16))),
- framework::dataset::make("FractionalBits", 1, 12)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_qs16);
-}
-TEST_SUITE_END() // QS16
-TEST_SUITE_END() // fixedPoint
-
TEST_SUITE(Quantized)
template <typename T>
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
index f9204f9424..114529edfd 100644
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ b/tests/validation/CL/SoftmaxLayer.cpp
@@ -47,9 +47,6 @@ namespace
RelativeTolerance<half> tolerance_f16(half(0.2));
RelativeTolerance<float> tolerance_f32(0.001f);
-/** Tolerance for fixed point operations */
-constexpr AbsoluteTolerance<int16_t> tolerance_fixed_point(2);
-
/** Tolerance for quantized operations */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
@@ -59,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::QASYMM8,
DataType::F16,
DataType::F32,
- DataType::QS8,
- DataType::QS16,
});
} // namespace
@@ -106,27 +101,23 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datase
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, // Invalid output quantization info
QuantizationInfo(1.f/256, 12)),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 12)),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 12)),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 0)),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, true, true, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, true })),
input_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
@@ -176,46 +167,6 @@ TEST_SUITE_END()
template <typename T>
using CLSoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-FIXTURE_DATA_TEST_CASE(RunTiny, CLSoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::SoftmaxLayerTinyShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-FIXTURE_DATA_TEST_CASE(RunTiny, CLSoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::SoftmaxLayerTinyShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using CLSoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp
index 0ff95df957..36a5e6fcfb 100644
--- a/tests/validation/CL/WidthConcatenateLayer.cpp
+++ b/tests/validation/CL/WidthConcatenateLayer.cpp
@@ -133,42 +133,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWidthConcatenateLayerFixture<float>, framewor
TEST_SUITE_END()
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
- framework::dataset::make("DataType",
- DataType::QS8)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(),
- framework::dataset::make("DataType",
- DataType::QS8)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)))
-{
- // Validate output
- validate(CLAccessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 76b3e2fbdc..49432d693e 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -104,26 +104,6 @@ std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::Activation
break;
}
break;
- case DataType::QS8:
- case DataType::QS16:
- switch(activation)
- {
- case ActivationLayerInfo::ActivationFunction::LOGISTIC:
- case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
- case ActivationLayerInfo::ActivationFunction::TANH:
- // Reduce range as exponent overflows
- bounds = std::make_pair(-(1 << fixed_point_position), 1 << fixed_point_position);
- break;
- case ActivationLayerInfo::ActivationFunction::SQRT:
- // Reduce range as sqrt should take a non-negative number
- // Can't be zero either as inv_sqrt is used in NEON.
- bounds = std::make_pair(1, std::numeric_limits<T>::max());
- break;
- default:
- bounds = std::make_pair(std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
- break;
- }
- break;
default:
ARM_COMPUTE_ERROR("Unsupported data type");
}
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 868683259c..289ca4870e 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -60,10 +60,6 @@ AbsoluteTolerance<float> tolerance(DataType data_type, ActivationLayerInfo::Acti
case ActivationLayerInfo::ActivationFunction::TANH:
switch(data_type)
{
- case DataType::QS8:
- return AbsoluteTolerance<float>(5.f);
- case DataType::QS16:
- return AbsoluteTolerance<float>(11.f);
case DataType::F16:
return AbsoluteTolerance<float>(0.01f);
default:
@@ -82,8 +78,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F16,
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
DataType::F32,
- DataType::QS8,
- DataType::QS16,
});
/** Input data sets. */
@@ -143,22 +137,16 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(),
})),
framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
})),
- framework::dataset::make("Expected", { false, true, false, false, true })),
+ framework::dataset::make("Expected", { false, true, false})),
input_info, output_info, act_info, expected)
{
bool is_valid = bool(NEActivationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), act_info));
@@ -210,49 +198,6 @@ TEST_SUITE_END()
template <typename T>
using NEActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<Tensor, Accessor, NEActivationLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// We test for fixed point precision [3,5] because [1,2] and [6,7] ranges cause
-// overflowing issues in most of the transcendentals functions.
-FIXTURE_DATA_TEST_CASE(RunTiny, NEActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 3, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance(_data_type, _function));
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 3, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance(_data_type, _function));
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-FIXTURE_DATA_TEST_CASE(RunTiny, NEActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance(_data_type, _function));
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance(_data_type, _function));
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<Tensor, Accessor, NEActivationLayer, T>;
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index 98eeaa2f61..b01e5d929d 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -48,10 +48,6 @@ const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::mak
DataType::U8));
const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)),
framework::dataset::make("DataType", DataType::S16));
-const auto ArithmeticAdditionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("DataType", DataType::QS8));
-const auto ArithmeticAdditionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("DataType", DataType::QS16));
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataType", DataType::F16));
@@ -74,26 +70,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
- framework::dataset::make("Expected", { true, true, false, false, false, false, true })),
+ framework::dataset::make("Expected", { true, true, false, false, false})),
input1_info, input2_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(NEArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS);
@@ -176,44 +166,6 @@ TEST_SUITE_END()
template <typename T>
using NEArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticAddition, T>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(F16)
diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp
index 01a107c17d..fc25465e6d 100644
--- a/tests/validation/NEON/ArithmeticSubtraction.cpp
+++ b/tests/validation/NEON/ArithmeticSubtraction.cpp
@@ -55,10 +55,6 @@ const auto ArithmeticSubtractionS16U8S16Dataset = combine(combine(framework::dat
framework::dataset::make("DataType", DataType::S16));
const auto ArithmeticSubtractionU8S16S16Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)),
framework::dataset::make("DataType", DataType::S16));
-const auto ArithmeticSubtractionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("DataType", DataType::QS8));
-const auto ArithmeticSubtractionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("DataType", DataType::QS16));
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataType", DataType::F16));
@@ -78,26 +74,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
})),
- framework::dataset::make("Expected", { true, true, false, false, false, false, true })),
+ framework::dataset::make("Expected", { true, true, false, false, false})),
input1_info, input2_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(NEArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS);
@@ -246,48 +236,6 @@ TEST_SUITE_END()
template <typename T1, typename T2 = T1, typename T3 = T1>
using NEArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticSubtraction, T1, T2, T3>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- ArithmeticSubtractionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- ArithmeticSubtractionQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- ArithmeticSubtractionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- ArithmeticSubtractionQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 53fd0163ff..3a18a0a93b 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -48,9 +48,7 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
-constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */
-const auto act_infos = framework::dataset::make("ActivationInfo",
+const auto act_infos = framework::dataset::make("ActivationInfo",
{
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
@@ -66,7 +64,7 @@ using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))),
- framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
+ framework::dataset::make("DataType", { DataType::F32 })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout)
{
@@ -106,46 +104,30 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(),
})),
framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(2U), 1, DataType::F16),
TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(5U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f),
- ActivationLayerInfo(),
- ActivationLayerInfo(),
})),
- framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, true, true})),
+ framework::dataset::make("Expected", { true, false, false, false, false, false})),
input_info, output_info, mvbg_info, act_info, expected)
{
const auto &mean_info = mvbg_info;
@@ -191,42 +173,6 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-template <typename T>
-using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
-
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("UseBeta", false)),
- framework::dataset::make("UseGamma", false)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs8, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("UseBeta", false)),
- framework::dataset::make("UseGamma", false)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs16, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/Col2Im.cpp b/tests/validation/NEON/Col2Im.cpp
index 9f2415d628..e4a52f25dd 100644
--- a/tests/validation/NEON/Col2Im.cpp
+++ b/tests/validation/NEON/Col2Im.cpp
@@ -43,19 +43,17 @@ TEST_SUITE(Col2Im)
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::S64), // Unsupported data type
TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Mismatching data type
- TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Invalid output shape
TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F32),
})),
framework::dataset::make("ConvolvedWidth", { 3, 3, 3, 3, 3 })),
framework::dataset::make("ConvolvedHeight", { 4, 4, 4, 4, 4 })),
- framework::dataset::make("Expected", { false, false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, true })),
input_info, output_info, convolved_width, convolved_height, expected)
{
bool status = bool(NECol2Im::validate(&input_info, &output_info, Size2D(convolved_width, convolved_height)));
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 2c17487107..747d8d2f62 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -52,7 +52,6 @@ const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute toleranc
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
/** CNN data types */
@@ -62,8 +61,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F16,
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
DataType::F32,
- DataType::QS8,
- DataType::QS16,
DataType::QASYMM8,
});
const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
@@ -251,52 +248,6 @@ TEST_SUITE_END()
template <typename T>
using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- ActivationFunctionsDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp
index 3bb6d6f8fc..87d413f202 100644
--- a/tests/validation/NEON/DeconvolutionLayer.cpp
+++ b/tests/validation/NEON/DeconvolutionLayer.cpp
@@ -102,14 +102,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Non supported data type
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 4), // Non supported data type
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape
TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink
TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0),
}),
framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0),
TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QS8, 5),
+ TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 5),
TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11),
TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0),
TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0),
diff --git a/tests/validation/NEON/DepthConcatenateLayer.cpp b/tests/validation/NEON/DepthConcatenateLayer.cpp
index c3bb8a93b7..0e0b674e41 100644
--- a/tests/validation/NEON/DepthConcatenateLayer.cpp
+++ b/tests/validation/NEON/DepthConcatenateLayer.cpp
@@ -100,42 +100,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<float>, framewor
TEST_SUITE_END()
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
- framework::dataset::make("DataType",
- DataType::QS8)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
- framework::dataset::make("DataType",
- DataType::QS8)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp
index ea63a5f52b..2bd3db7075 100644
--- a/tests/validation/NEON/DepthConvertLayer.cpp
+++ b/tests/validation/NEON/DepthConvertLayer.cpp
@@ -44,19 +44,14 @@ namespace validation
namespace
{
/** Input data sets **/
-const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
-const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
-const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
-const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8));
-const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16));
-const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7);
-const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7);
+const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
+const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
+const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
+const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7);
} // namespace
TEST_SUITE(NEON)
@@ -73,10 +68,6 @@ template <typename T>
using NEDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint32_t>;
template <typename T>
using NEDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, float>;
-template <typename T>
-using NEDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, int8_t>;
-template <typename T>
-using NEDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, int16_t>;
TEST_SUITE(U8_to_U16)
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
@@ -367,124 +358,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS32Fixture<int16_t>, frame
}
TEST_SUITE_END()
-TEST_SUITE(Quantized_to_FP32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset),
- shape, dt, policy, fixed_point_position)
-{
- int shift = 0;
-
- // Create tensors
- Tensor src = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, DataType::F32, 1, fixed_point_position);
-
- // Create and Configure function
- NEDepthConvertLayer depth_convert;
- depth_convert.configure(&src, &dst, policy, shift);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS8, NEDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerQS8toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS16, NEDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerQS16toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerQS8toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerQS16toFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(FP32_to_Quantized)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset),
- shape, dt, policy, fixed_point_position)
-{
- int shift = 0;
-
- // Create tensors
- Tensor src = create_tensor<Tensor>(shape, DataType::F32, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
-
- // Create and Configure function
- NEDepthConvertLayer depth_convert;
- depth_convert.configure(&src, &dst, policy, shift);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS8, NEDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerFP32toQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunTinyQS16, NEDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(),
- DepthConvertLayerFP32toQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerFP32toQS8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(),
- DepthConvertLayerFP32toQS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- DepthConvertLayerFixedPointQuantizedDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index d9fd093c8e..2888a6535e 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -47,7 +47,6 @@ const AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for c
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
/** CNN data types */
@@ -57,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F16,
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
DataType::F32,
- DataType::QS8,
- DataType::QS16,
DataType::QASYMM8,
});
} // namespace
@@ -203,56 +200,6 @@ TEST_SUITE_END()
template <typename T>
using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)),
- framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index f700758d21..4995d881cc 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -42,7 +42,6 @@ namespace validation
{
namespace
{
-constexpr AbsoluteTolerance<float> tolerance_qs(1.f); /**< Tolerance for fixed point tests */
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
constexpr AbsoluteTolerance<float> tolerance_fp16(0.01f); /**< Tolerance for half precision floating point tests */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
@@ -59,33 +58,12 @@ const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX",
combine(framework::dataset::make("PadY", { 0, 3 }),
framework::dataset::make("KernelSize", 5))));
-const auto data_pad_qs8 = concat(combine(framework::dataset::make("PadX", 0),
- combine(framework::dataset::make("PadY", 0),
- framework::dataset::make("KernelSize", 1))),
- combine(framework::dataset::make("PadX", { 0, 2 }),
- combine(framework::dataset::make("PadY", { 0, 2 }),
- framework::dataset::make("KernelSize", 3))));
-
const auto data_f32 = combine(datasets::SmallDirectConvolutionShapes(),
combine(framework::dataset::make("StrideX", { 1, 3 }),
combine(framework::dataset::make("StrideY", { 1, 3 }),
combine(data_pad_f32,
framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))));
-const auto data_qs8 = combine(datasets::TinyDirectConvolutionShapes(),
- combine(framework::dataset::make("StrideX", { 1, 3 }),
- combine(framework::dataset::make("StrideY", { 1, 3 }),
- combine(data_pad_qs8,
- framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))));
-
-/** Direct convolution QS16 data set. */
-const auto data_qs16 = combine(datasets::TinyDirectConvolutionShapes(),
- combine(framework::dataset::make("StrideX", { 1, 3 }),
- combine(framework::dataset::make("StrideY", { 1, 3 }),
- combine(framework::dataset::make("PadX", 0),
- combine(framework::dataset::make("PadY", 0),
- combine(framework::dataset::make("KernelSize", 1),
- framework::dataset::make("NumKernels", { 1, 4, 8, 16 })))))));
/** Activation function Dataset*/
const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
{
@@ -205,30 +183,6 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
});
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(data_qs8, framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)),
- QuantizedActivationFunctionsDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// We test for fixed point precision [4,13]
-FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(data_qs16, framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 4, 14)),
- QuantizedActivationFunctionsDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/FixedPoint/FixedPoint.cpp b/tests/validation/NEON/FixedPoint/FixedPoint.cpp
deleted file mode 100644
index a583b587ae..0000000000
--- a/tests/validation/NEON/FixedPoint/FixedPoint.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "FixedPointTarget.h"
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/PaddingCalculator.h"
-#include "tests/datasets/ShapeDatasets.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/FixedPointFixture.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-constexpr AbsoluteTolerance<float> tolerance_exp_qs8(0.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential) for DataType::QS8 */
-constexpr AbsoluteTolerance<float> tolerance_exp_qs16(1.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential) for DataType::QS16 */
-constexpr AbsoluteTolerance<float> tolerance_invsqrt_qs8(4.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) for DataType::QS8 */
-constexpr AbsoluteTolerance<float> tolerance_invsqrt_qs16(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) for DataType::QS16 */
-constexpr AbsoluteTolerance<float> tolerance_log_qs8(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) for DataType::QS8 */
-constexpr AbsoluteTolerance<float> tolerance_log_qs16(7.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) for DataType::QS16 */
-} // namespace
-
-TEST_SUITE(NEON)
-TEST_SUITE(FixedPoint)
-template <typename T>
-using NEFixedPointFixture = FixedPointValidationFixture<Tensor, Accessor, T>;
-
-TEST_SUITE(QS8)
-TEST_SUITE(Exp)
-
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FixedPointOp", FixedPointOp::EXP)),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_exp_qs8, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(Invsqrt)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_invsqrt_qs8, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(Log)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FixedPointOp", FixedPointOp::LOG)),
- framework::dataset::make("FractionalBits", 3, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_log_qs8, 0);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-TEST_SUITE(Exp)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FixedPointOp", FixedPointOp::EXP)),
- framework::dataset::make("FractionalBits", 1, 15)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_exp_qs16, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(Invsqrt)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("Shape", TensorShape(8192U)),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_invsqrt_qs16, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(Log)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FixedPointOp", FixedPointOp::LOG)),
- framework::dataset::make("FractionalBits", 4, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_log_qs16, 0);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/NEON/FixedPoint/FixedPointTarget.h b/tests/validation/NEON/FixedPoint/FixedPointTarget.h
deleted file mode 100644
index 31ccc0817e..0000000000
--- a/tests/validation/NEON/FixedPoint/FixedPointTarget.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET
-#define ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET
-
-#include "arm_compute/core/NEON/NEFixedPoint.h"
-
-#include "tests/Globals.h"
-#include "tests/Types.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-template <typename TensorType, typename AccessorType, typename T>
-void compute_target_impl(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position, TensorType &src, TensorType &dst)
-{
- Window window;
-
- switch(dt)
- {
- case DataType::QS8:
- {
- constexpr unsigned int num_elems_processed_per_iteration = 16;
- window = calculate_max_window(*src.info(), Steps(num_elems_processed_per_iteration));
- AccessWindowHorizontal input_access(src.info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(dst.info(), 0, num_elems_processed_per_iteration);
- update_window_and_padding(window, input_access, output_access);
- break;
- }
- case DataType::QS16:
- {
- constexpr unsigned int num_elems_processed_per_iteration = 8;
- window = calculate_max_window(*src.info(), Steps(num_elems_processed_per_iteration));
- AccessWindowHorizontal input_access(src.info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(dst.info(), 0, num_elems_processed_per_iteration);
- update_window_and_padding(window, input_access, output_access);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not Supported");
- break;
- }
-
- int min;
- int max;
- switch(op)
- {
- case FixedPointOp::EXP:
- {
- // Fill tensors. Keep the range between [-1.0, 1.0) so the result won't
- // overflow.
- min = -(1 << (fixed_point_position - 1));
- max = (1 << (fixed_point_position - 1));
- break;
- }
- case FixedPointOp::INV_SQRT:
- {
- if(dt == DataType::QS8)
- {
- // Fill tensors. Keep the range between [1, 127).
- min = 1;
- max = 127;
- }
- else
- {
- // Fill tensors. Keep the range between [1, 0x7FFF)
- min = 1;
- max = 0x7FFF;
- }
- break;
- }
- case FixedPointOp::LOG:
- {
- if(dt == DataType::QS8)
- {
- // Fill tensors. Keep the range between [(1 << (fixed_point_position - 1), 63) so the result won't
- // overflow. E.g. for Q2.5 ln(0.001) = -6.9, which cannot be represented.
- min = (1 << (fixed_point_position - 1));
- max = 0x3F;
- }
- else
- {
- // Fill tensors. Keep the range between [(1 << (fixed_point_position - 1), 0x3FFF) so the result won't
- // overflow.
- min = (1 << (fixed_point_position - 1));
- max = 0x3FFF;
- }
- break;
- }
- case FixedPointOp::RECIPROCAL:
- {
- if(dt == DataType::QS8)
- {
- // Fill tensors. Keep the range between [15, 100) so the result won't
- // overflow. E.g. for Q2.5 reciprocal(0.001) = 1000, which cannot be represented.
- min = 15;
- max = 0x7F;
- }
- else
- {
- // Fill tensors. Keep the range between [15, 0x7FFF) so the result won't
- // overflow.
- min = 15;
- max = 0x7FFF;
- }
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not Supported");
- break;
- }
-
- std::uniform_int_distribution<> distribution(min, max);
- library->fill(AccessorType(src), distribution, 0);
-
- Iterator input(&src, window);
- Iterator output(&dst, window);
-
- const auto loop_function = [&](const Coordinates & id)
- {
- switch(dt)
- {
- case DataType::QS8:
- {
- const qint8x16_t qs8in = vld1q_s8(reinterpret_cast<const qint8_t *>(input.ptr()));
- switch(op)
- {
- case FixedPointOp::EXP:
- {
- // Use saturated exp
- vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vqexpq_qs8(qs8in, fixed_point_position));
- break;
- }
- case FixedPointOp::INV_SQRT:
- {
- vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vqinvsqrtq_qs8(qs8in, fixed_point_position));
- break;
- }
- case FixedPointOp::LOG:
- {
- vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vlogq_qs8(qs8in, fixed_point_position));
- break;
- }
- case FixedPointOp::RECIPROCAL:
- {
- vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vrecipq_qs8(qs8in, fixed_point_position));
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not Supported");
- break;
- }
- break;
- }
- case DataType::QS16:
- {
- const qint16x8_t qs16in = vld1q_qs16(reinterpret_cast<const qint16_t *>(input.ptr()));
- switch(op)
- {
- case FixedPointOp::EXP:
- {
- // Use saturated exp
- vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqexpq_qs16(qs16in, fixed_point_position));
- break;
- }
- case FixedPointOp::INV_SQRT:
- {
- vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqinvsqrtq_qs16(qs16in, fixed_point_position));
- break;
- }
- case FixedPointOp::LOG:
- {
- vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vlogq_qs16(qs16in, fixed_point_position));
- break;
- }
- case FixedPointOp::RECIPROCAL:
- {
- vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqrecipq_qs16(qs16in, fixed_point_position));
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not Supported");
- break;
- }
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not Supported");
- break;
- }
- };
-
- execute_window_loop(window, loop_function, input, output);
-}
-} // namespace
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET */
diff --git a/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp b/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp
deleted file mode 100644
index d7954dec64..0000000000
--- a/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/datasets/ConvertPolicyDataset.h"
-#include "tests/datasets/ShapeDatasets.h"
-#include "tests/framework/Macros.h"
-#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-const float tolerance = 1.f;
-const float scale_255 = 1.f / 255.f;
-const float scale_unity = 1.f;
-
-// *INDENT-OFF*
-// clang-format off
-#define FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, SCALE, RP, FPP_START, FPP_END) \
- FIXTURE_DATA_TEST_CASE(TEST_NAME, NEFixedPointPixelWiseMultiplication##FIXTURE, framework::DatasetMode::MODE, \
- combine(combine(combine(combine(combine(combine( \
- datasets::SHAPES, \
- framework::dataset::make("DataType1", DataType::DT1)), \
- framework::dataset::make("DataType2", DataType::DT2)), \
- framework::dataset::make("Scale", std::move(SCALE))), \
- datasets::ConvertPolicies()), \
- framework::dataset::make("RoundingPolicy", RoundingPolicy::RP)), \
- framework::dataset::make("FixedPointPosition", FPP_START, FPP_END))) \
- { \
- validate(Accessor(_target), _reference); \
- }
-
-#define FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, RP, FPP, TOLERANCE) \
- FIXTURE_DATA_TEST_CASE(TEST_NAME, NEFixedPointPixelWiseMultiplication##FIXTURE, framework::DatasetMode::MODE, \
- combine(combine(combine(combine(combine(combine( \
- datasets::SHAPES, \
- framework::dataset::make("DataType1", DataType::DT1)), \
- framework::dataset::make("DataType2", DataType::DT2)), \
- framework::dataset::make("Scale", 1.f / static_cast<float>(1 << (FPP)))), \
- datasets::ConvertPolicies()), \
- framework::dataset::make("RoundingPolicy", RoundingPolicy::RP)), \
- framework::dataset::make("FixedPointPosition", FPP))) \
- { \
- validate(Accessor(_target), _reference, AbsoluteTolerance<float>(TOLERANCE), 0.f); \
- }
-// clang-format on
-// *INDENT-ON*
-} // namespace
-
-template <typename T>
-using NEFixedPointPixelWiseMultiplicationFixture = FixedPointPixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T>;
-
-TEST_SUITE(NEON)
-TEST_SUITE(FixedPointPixelWiseMultiplication)
-
-TEST_SUITE(QS8)
-
-TEST_SUITE(Scale255)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, scale_255, TO_NEAREST_UP, 1, 7)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, scale_255, TO_NEAREST_UP, 1, 7)
-TEST_SUITE_END() // Scale255
-
-TEST_SUITE(ScaleUnity)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7)
-TEST_SUITE_END() // ScaleUnity
-
-TEST_SUITE(ScaleOther)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther1, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 1, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther2, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 2, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther3, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 3, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther4, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 4, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther5, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 5, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther6, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 6, tolerance)
-
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther1, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 1, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther2, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 2, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther3, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 3, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther4, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 4, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther5, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 5, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther6, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 6, tolerance)
-TEST_SUITE_END() // ScaleOther
-
-TEST_SUITE_END() // QS8
-
-TEST_SUITE(QS16)
-
-TEST_SUITE(Scale255)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, scale_255, TO_NEAREST_UP, 1, 15)
-TEST_SUITE_END() // Scale255
-
-TEST_SUITE(ScaleUnity)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture<qint16_t>, NIGHTLY, SmallShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15)
-TEST_SUITE_END() // ScaleUnity
-
-TEST_SUITE(ScaleOther)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther1, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 1, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther2, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 2, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther3, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 3, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther4, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 4, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther5, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 5, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther6, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 6, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther7, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 7, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther8, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 8, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther9, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 9, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther10, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 10, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther11, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 11, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther12, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 12, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther13, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 13, tolerance)
-FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther14, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 14, tolerance)
-TEST_SUITE_END() // ScaleOther
-
-TEST_SUITE_END() // QS16
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/NEON/Flatten.cpp b/tests/validation/NEON/Flatten.cpp
index 332716925a..86f1a11262 100644
--- a/tests/validation/NEON/Flatten.cpp
+++ b/tests/validation/NEON/Flatten.cpp
@@ -80,38 +80,6 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEFlattenLayerFixture<int8_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()),
- framework::dataset::make("DataType", DataType::QS8)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFlattenLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::QS8)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEFlattenLayerFixture<int16_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()),
- framework::dataset::make("DataType", DataType::QS16)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFlattenLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::QS16)))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index ed0edcd3be..3adcf61dc9 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -47,8 +47,6 @@ constexpr RelativeTolerance<float> tolerance_f32(0.01f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
constexpr RelativeTolerance<float> tolerance_f16(0.01f);
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
-/** Tolerance for fixed point operations */
-constexpr AbsoluteTolerance<float> tolerance_fixed_point(1.f);
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
@@ -57,8 +55,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F16,
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
DataType::F32,
- DataType::QS8,
- DataType::QS16,
});
const auto FullyConnectedParameters = combine(framework::dataset::make("TransposeWeights", { false, true }), framework::dataset::make("ReshapeWeights", { false, true }));
@@ -119,7 +115,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types
- TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::QS8, 2), // Mismatching fixed point position
TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions
@@ -127,7 +122,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
}),
framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16),
- TensorInfo(TensorShape(315U, 271U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
@@ -135,7 +129,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
})),
framework::dataset::make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32),
- TensorInfo(TensorShape(271U), 1, DataType::QS8, 2),
TensorInfo(TensorShape(192U), 1, DataType::F32),
TensorInfo(TensorShape(192U), 1, DataType::F32),
TensorInfo(TensorShape(271U), 1, DataType::F32),
@@ -143,16 +136,15 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
TensorInfo(TensorShape(192U), 1, DataType::F32),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(271U, 3U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
TensorInfo(TensorShape(271U, 3U), 1, DataType::F32),
TensorInfo(TensorShape(192U, 4U), 1, DataType::F32),
})),
- framework::dataset::make("TransposeWeights",{ true, true, true, false, true, true, true })),
- framework::dataset::make("ReshapedWeights",{ false, false, false, false, false, false , false})),
- framework::dataset::make("Expected", { false, false, true, true, false, false, true })),
+ framework::dataset::make("TransposeWeights",{ true, true, false, true, true, true })),
+ framework::dataset::make("ReshapedWeights",{ false, false, false, false, false , false})),
+ framework::dataset::make("Expected", { false, true, true, false, false, true })),
input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected)
{
Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), transpose_weights, reshaped_weights);
@@ -203,52 +195,6 @@ TEST_SUITE_END()
template <typename T>
using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-FIXTURE_DATA_TEST_CASE(RunTiny, NEFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-FIXTURE_DATA_TEST_CASE(RunTiny, NEFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(),
- FullyConnectedParameters),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp
index bfa2db443a..e0f63a8a2d 100644
--- a/tests/validation/NEON/GEMM.cpp
+++ b/tests/validation/NEON/GEMM.cpp
@@ -50,7 +50,6 @@ namespace validation
namespace
{
constexpr AbsoluteTolerance<float> tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
@@ -59,8 +58,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F16,
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
DataType::F32,
- DataType::QS8,
- DataType::QS16,
});
const auto data_interleave = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12);
@@ -82,33 +79,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMo
}
TEST_SUITE_END() // FP32
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-using NEGEMMTranspose1xW = NESynthetizeFunctionWithZeroConstantBorder<NEGEMMTranspose1xWKernel, 16>;
-using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<Tensor, Accessor, NEGEMMTranspose1xW, int8_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose *
- framework::dataset::make("DataType", DataType::QS8)
- * framework::dataset::make("FractionalBits", 1, 7))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-using NEGEMMTranspose1xW = NESynthetizeFunctionWithZeroConstantBorder<NEGEMMTranspose1xWKernel, 8>;
-using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<Tensor, Accessor, NEGEMMTranspose1xW, int16_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose *
- framework::dataset::make("DataType", DataType::QS16)
- * framework::dataset::make("FractionalBits", 1, 14))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-
TEST_SUITE_END() // TRANSPOSE_1XW
TEST_SUITE(INTERLEAVE_4X4)
@@ -123,31 +93,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetM
}
TEST_SUITE_END() // FP32
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<Tensor, Accessor, NEGEMMInterleave4x4, int8_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave *
- framework::dataset::make("DataType", DataType::QS8)
- * framework::dataset::make("FractionalBits", 1, 7))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<Tensor, Accessor, NEGEMMInterleave4x4, int16_t>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave *
- framework::dataset::make("DataType", DataType::QS16)
- * framework::dataset::make("FractionalBits", 1, 14))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-
TEST_SUITE_END() // INTERLEAVE_4X4
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes),
@@ -211,46 +156,6 @@ TEST_SUITE_END()
template <typename T>
using NEGEMMFixedPointFixture = GEMMValidationFixedPointFixture<Tensor, Accessor, NEGEMM, T>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 7)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_q);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index ed24d618f5..eb350e1029 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -102,7 +102,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::c
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4
- TensorInfo(TensorShape(21U, 13U), 1, DataType::QS8, 2), // Mismatching data type
+ TensorInfo(TensorShape(21U, 13U), 1, DataType::S32, 2), // Mismatching data type
TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)),
diff --git a/tests/validation/NEON/Im2Col.cpp b/tests/validation/NEON/Im2Col.cpp
index 3a45fa7ae4..d2e7875853 100644
--- a/tests/validation/NEON/Im2Col.cpp
+++ b/tests/validation/NEON/Im2Col.cpp
@@ -52,20 +52,18 @@ TEST_SUITE(Im2Col)
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::U8), // Unsupported data type
TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::F32), // Mismatching data type
- TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Bias not supported with QASYMM8
TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Mismatching shapes
TensorInfo(TensorShape(10U, 12U, 2U, 2U), 1, DataType::QASYMM8),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(18U, 80U, 1U, 2U), 1, DataType::QASYMM8),
})),
- framework::dataset::make("HasBias", { true, true, true, true, false, false })),
- framework::dataset::make("Expected", { false, false, false, false, false, true })),
+ framework::dataset::make("HasBias", { true, true, true, false, false })),
+ framework::dataset::make("Expected", { false, false, false, false, true })),
input_info, output_info, has_bias, expected)
{
bool status = bool(NEIm2Col::validate(&input_info, &output_info, Size2D(3U, 3U), PadStrideInfo(), has_bias, false, false));
diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp
index cf63675246..8c66611f49 100644
--- a/tests/validation/NEON/NormalizationLayer.cpp
+++ b/tests/validation/NEON/NormalizationLayer.cpp
@@ -48,9 +48,6 @@ namespace
constexpr AbsoluteTolerance<float> tolerance_f16(0.001f);
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
-/** Tolerance for fixed point operations */
-constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2);
-constexpr AbsoluteTolerance<int16_t> tolerance_qs16(4);
/** Input data set. */
const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)),
@@ -76,7 +73,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
}),
@@ -84,7 +80,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0),
})),
@@ -93,10 +88,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
NormalizationLayerInfo(NormType::IN_MAP_1D, 4),
NormalizationLayerInfo(NormType::IN_MAP_2D, 5),
NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
- NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
NormalizationLayerInfo(NormType::CROSS_MAP, 1),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, false, true })),
input_info, output_info, norm_info, expected)
{
bool is_valid = bool(NENormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info));
@@ -141,44 +135,6 @@ TEST_SUITE_END()
template <typename T>
using NENormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<Tensor, Accessor, NENormalizationLayer, T>;
-TEST_SUITE(Quantized)
-TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-FIXTURE_DATA_TEST_CASE(RunTiny, NENormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs8);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs8);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-FIXTURE_DATA_TEST_CASE(RunTiny, NENormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs16);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs16);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/NEON/PixelWiseMultiplication.cpp b/tests/validation/NEON/PixelWiseMultiplication.cpp
index 9304c8bad0..77da473e0d 100644
--- a/tests/validation/NEON/PixelWiseMultiplication.cpp
+++ b/tests/validation/NEON/PixelWiseMultiplication.cpp
@@ -114,10 +114,6 @@ using NEPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationF
template <typename T>
using NEPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, float>;
template <typename T>
-using NEPixelWiseMultiplicationToQS8Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, qint8_t>;
-template <typename T>
-using NEPixelWiseMultiplicationToQS16Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, qint16_t>;
-template <typename T>
using NEPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, float>;
TEST_SUITE(NEON)
@@ -132,9 +128,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid scale
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching data type
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Invalid scale
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data type
}),
framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
@@ -142,9 +136,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS16, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
})),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
@@ -152,12 +144,10 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
})),
- framework::dataset::make("Scale",{ scale_unity, scale_unity, scale_unity, -1.f, scale_unity, scale_unity, scale_unity, scale_unity, 3.f})),
- framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })),
+ framework::dataset::make("Scale",{ scale_unity, scale_unity, scale_unity, -1.f, scale_unity, scale_unity, scale_unity})),
+ framework::dataset::make("Expected", { true, true, false, false, false, false, false })),
input1_info, input2_info, output_info, scale, expected)
{
bool has_error = bool(NEPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO));
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index b44f945eb7..8762f1f7cc 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -65,8 +65,6 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value f
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for float types */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-constexpr AbsoluteTolerance<float> tolerance_qs8(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
-constexpr AbsoluteTolerance<float> tolerance_qs16(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */
} // namespace
@@ -80,8 +78,6 @@ TEST_SUITE(PoolingLayer)
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11), // Window shrink
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling
@@ -90,8 +86,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0),
TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5),
- TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11),
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0),
@@ -100,15 +94,13 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
- PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
- PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)),
PoolingLayerInfo(PoolingType::AVG),
PoolingLayerInfo(PoolingType::MAX),
PoolingLayerInfo(PoolingType::AVG),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, false, true, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, false, false, true })),
input_info, output_info, pool_info, expected)
{
bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info));
@@ -170,42 +162,6 @@ TEST_SUITE_END() // Float
template <typename T>
using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<Tensor, Accessor, NEPoolingLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS8))),
- framework::dataset::make("FractionalBits", 1, 5)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs8);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS8))),
- framework::dataset::make("FractionalBits", 1, 5)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs8);
-}
-TEST_SUITE_END() // QS8
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS16))),
- framework::dataset::make("FractionalBits", 1, 13)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs16);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
- framework::dataset::make("DataType", DataType::QS16))),
- framework::dataset::make("FractionalBits", 1, 13)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs16);
-}
-TEST_SUITE_END() // QS16
-TEST_SUITE_END() // FixedPoint
-
TEST_SUITE(Quantized)
template <typename T>
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index e091c0268a..4a1aa23ac1 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -47,8 +47,6 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
constexpr AbsoluteTolerance<float> tolerance_f16(0.0001f);
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
-/** Tolerance for fixed point operations */
-constexpr AbsoluteTolerance<int16_t> tolerance_fixed_point(2);
/** Tolerance for quantized operations */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
@@ -60,8 +58,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F16,
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
DataType::F32,
- DataType::QS8,
- DataType::QS16,
});
} // namespace
@@ -101,15 +97,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datase
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point
TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32),
})),
- framework::dataset::make("Expected", { false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, true })),
input_info, output_info, expected)
{
bool is_valid = bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false)));
@@ -162,46 +156,6 @@ TEST_SUITE_END()
template <typename T>
using NESoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<Tensor, Accessor, NESoftmaxLayer, T>;
-TEST_SUITE(FixedPoint)
-TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-FIXTURE_DATA_TEST_CASE(RunTiny, NESoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SoftmaxLayerTinyShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType",
- DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-FIXTURE_DATA_TEST_CASE(RunTiny, NESoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SoftmaxLayerTinyShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(),
- framework::dataset::make("DataType",
- DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_fixed_point);
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
template <typename T>
using NESoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<Tensor, Accessor, NESoftmaxLayer, T>;
diff --git a/tests/validation/UNIT/FixedPoint.cpp b/tests/validation/UNIT/FixedPoint.cpp
deleted file mode 100644
index 66a4b25fc9..0000000000
--- a/tests/validation/UNIT/FixedPoint.cpp
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "tests/validation/FixedPoint.h"
-
-#include "tests/Globals.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "tests/validation/Validation.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace
-{
-const auto FuncNamesDataset = framework::dataset::make("FunctionNames", { FixedPointOp::ADD,
- FixedPointOp::SUB,
- FixedPointOp::MUL,
- FixedPointOp::EXP,
- FixedPointOp::LOG,
- FixedPointOp::INV_SQRT
- });
-
-template <typename T>
-void load_array_from_numpy(const std::string &file, std::vector<unsigned long> &shape, std::vector<T> &data) // NOLINT
-{
- try
- {
- npy::LoadArrayFromNumpy(file, shape, data);
- }
- catch(const std::runtime_error &e)
- {
- throw framework::FileNotFound("Could not load npy file: " + file + " (" + e.what() + ")");
- }
-}
-} // namespace
-
-TEST_SUITE(UNIT)
-TEST_SUITE(FixedPoint)
-
-// *INDENT-OFF*
-// clang-format off
-DATA_TEST_CASE(FixedPointQS8Inputs, framework::DatasetMode::ALL, combine(
- FuncNamesDataset,
- framework::dataset::make("FractionalBits", 1, 7)),
- func_name, frac_bits)
-// clang-format on
-// *INDENT-ON*
-{
- std::vector<double> data;
- std::vector<unsigned long> shape; //NOLINT
-
- std::string func_name_lower = to_string(func_name);
- std::transform(func_name_lower.begin(), func_name_lower.end(), func_name_lower.begin(), ::tolower);
-
- const std::string inputs_file = library->path()
- + "fixed_point/"
- + func_name_lower
- + "_Q8."
- + support::cpp11::to_string(frac_bits)
- + ".in.npy";
-
- load_array_from_numpy(inputs_file, shape, data);
-
- // Values stored as doubles so reinterpret as floats
- const auto *float_val = reinterpret_cast<float *>(&data[0]);
- const size_t num_elements = data.size() * sizeof(double) / sizeof(float);
-
- for(unsigned int i = 0; i < num_elements; ++i)
- {
- // Convert to fixed point
- fixed_point_arithmetic::fixed_point<int8_t> in_val(float_val[i], frac_bits);
-
- // Check that the value didn't change
- ARM_COMPUTE_EXPECT(static_cast<float>(in_val) == float_val[i], framework::LogLevel::ERRORS);
- }
-}
-
-//FIXME: Figure out how to handle expected failures properly
-// The last input argument specifies the expected number of failures for a
-// given combination of (function name, number of fractional bits) as defined
-// by the first two arguments.
-
-// *INDENT-OFF*
-// clang-format off
-DATA_TEST_CASE(FixedPointQS8Outputs, framework::DatasetMode::ALL, zip(combine(
- FuncNamesDataset,
- framework::dataset::make("FractionalBits", 1, 7)),
- framework::dataset::make("ExpectedFailures", { 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 7, 8, 13, 2, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 5, 33, 96 })),
- func_name, frac_bits, expected_failures)
-// clang-format on
-// *INDENT-ON*
-{
- std::vector<double> in_data;
- std::vector<unsigned long> in_shape; //NOLINT
-
- std::vector<double> out_data;
- std::vector<unsigned long> out_shape; //NOLINT
-
- std::string func_name_lower = to_string(func_name);
- std::transform(func_name_lower.begin(), func_name_lower.end(), func_name_lower.begin(), ::tolower);
-
- const std::string base_file_name = library->path()
- + "fixed_point/"
- + func_name_lower
- + "_Q8."
- + support::cpp11::to_string(frac_bits);
-
- const std::string inputs_file = base_file_name + ".in.npy";
- const std::string reference_file = base_file_name + ".out.npy";
-
- load_array_from_numpy(inputs_file, in_shape, in_data);
- load_array_from_numpy(reference_file, out_shape, out_data);
-
- ARM_COMPUTE_EXPECT(in_shape.front() == out_shape.front(), framework::LogLevel::ERRORS);
-
- const float step_size = std::pow(2.f, -frac_bits);
- int64_t num_mismatches = 0;
-
- // Values stored as doubles so reinterpret as floats
- const auto *float_val = reinterpret_cast<float *>(&in_data[0]);
- const auto *ref_val = reinterpret_cast<float *>(&out_data[0]);
-
- const size_t num_elements = in_data.size() * sizeof(double) / sizeof(float);
-
- for(unsigned int i = 0; i < num_elements; ++i)
- {
- fixed_point_arithmetic::fixed_point<int8_t> in_val(float_val[i], frac_bits);
- fixed_point_arithmetic::fixed_point<int8_t> out_val(0.f, frac_bits);
-
- float tolerance = 0.f;
-
- if(func_name == FixedPointOp::ADD)
- {
- out_val = in_val + in_val;
- }
- else if(func_name == FixedPointOp::SUB)
- {
- out_val = in_val - in_val; //NOLINT
- }
- else if(func_name == FixedPointOp::MUL)
- {
- tolerance = 1.f * step_size;
- out_val = in_val * in_val;
- }
- else if(func_name == FixedPointOp::EXP)
- {
- tolerance = 2.f * step_size;
- out_val = fixed_point_arithmetic::exp(in_val);
- }
- else if(func_name == FixedPointOp::LOG)
- {
- tolerance = 4.f * step_size;
- out_val = fixed_point_arithmetic::log(in_val);
- }
- else if(func_name == FixedPointOp::INV_SQRT)
- {
- tolerance = 5.f * step_size;
- out_val = fixed_point_arithmetic::inv_sqrt(in_val);
- }
-
- if(std::abs(static_cast<float>(out_val) - ref_val[i]) > tolerance)
- {
- ARM_COMPUTE_TEST_INFO("input = " << in_val);
- ARM_COMPUTE_TEST_INFO("output = " << out_val);
- ARM_COMPUTE_TEST_INFO("reference = " << ref_val[i]);
- ARM_COMPUTE_TEST_INFO("tolerance = " << tolerance);
-
- ARM_COMPUTE_TEST_INFO((std::abs(static_cast<float>(out_val) - ref_val[i]) <= tolerance));
-
- ++num_mismatches;
- }
- }
-
- ARM_COMPUTE_EXPECT(num_mismatches == expected_failures, framework::LogLevel::ERRORS);
-}
-
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/UNIT/TensorInfo.cpp b/tests/validation/UNIT/TensorInfo.cpp
index dd7ae6d18c..b78f656932 100644
--- a/tests/validation/UNIT/TensorInfo.cpp
+++ b/tests/validation/UNIT/TensorInfo.cpp
@@ -112,12 +112,6 @@ TEST_CASE(TensorInfoBuild, framework::DatasetMode::ALL)
ARM_COMPUTE_EXPECT(info.data_type() == DataType::S32, framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(info.num_channels() == 3, framework::LogLevel::ERRORS);
- // Update data type channels and set fixed point position
- info.set_data_type(DataType::QS8).set_num_channels(1).set_fixed_point_position(3);
- ARM_COMPUTE_EXPECT(info.data_type() == DataType::QS8, framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.num_channels() == 1, framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.fixed_point_position() == 3, framework::LogLevel::ERRORS);
-
// Update data type and set quantization info
info.set_data_type(DataType::QASYMM8).set_quantization_info(QuantizationInfo(0.5f, 15));
ARM_COMPUTE_EXPECT(info.data_type() == DataType::QASYMM8, framework::LogLevel::ERRORS);
diff --git a/tests/validation/Validation.cpp b/tests/validation/Validation.cpp
index d01ac12f97..0bc1d96306 100644
--- a/tests/validation/Validation.cpp
+++ b/tests/validation/Validation.cpp
@@ -62,14 +62,10 @@ double get_double_data(const void *ptr, DataType data_type)
return *reinterpret_cast<const uint8_t *>(ptr);
case DataType::S8:
return *reinterpret_cast<const int8_t *>(ptr);
- case DataType::QS8:
- return *reinterpret_cast<const qint8_t *>(ptr);
case DataType::U16:
return *reinterpret_cast<const uint16_t *>(ptr);
case DataType::S16:
return *reinterpret_cast<const int16_t *>(ptr);
- case DataType::QS16:
- return *reinterpret_cast<const qint16_t *>(ptr);
case DataType::U32:
return *reinterpret_cast<const uint32_t *>(ptr);
case DataType::S32:
diff --git a/tests/validation/fixtures/FixedPointFixture.h b/tests/validation/fixtures/FixedPointFixture.h
deleted file mode 100644
index 3f3bb6e0b7..0000000000
--- a/tests/validation/fixtures/FixedPointFixture.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_FIXED_POINT_FIXTURE
-#define ARM_COMPUTE_TEST_FIXED_POINT_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/FixedPoint.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename T>
-class FixedPointValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape, DataType dt, FixedPointOp op, int fractional_bits)
- {
- _fractional_bits = fractional_bits;
- _target = compute_target(shape, dt, op, fractional_bits);
- _reference = compute_reference(shape, dt, op, fractional_bits);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, int min, int max, int i)
- {
- std::uniform_int_distribution<> distribution(min, max);
- library->fill(tensor, distribution, i);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position)
- {
- // Create tensors
- TensorType src = create_tensor<TensorType>(shape, dt, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(shape, dt, 1, fixed_point_position);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- compute_target_impl<TensorType, AccessorType, T>(shape, dt, op, fixed_point_position, src, dst);
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position)
- {
- // Create reference
- SimpleTensor<T> src{ shape, dt, 1, fixed_point_position };
-
- // Fill reference
- int min = 0;
- int max = 0;
- switch(op)
- {
- case FixedPointOp::EXP:
- min = -(1 << (fixed_point_position - 1));
- max = (1 << (fixed_point_position - 1));
- break;
- case FixedPointOp::INV_SQRT:
- min = 1;
- max = (dt == DataType::QS8) ? 0x7F : 0x7FFF;
- break;
- case FixedPointOp::LOG:
- min = (1 << (fixed_point_position - 1));
- max = (dt == DataType::QS8) ? 0x3F : 0x3FFF;
- break;
- case FixedPointOp::RECIPROCAL:
- min = 15;
- max = (dt == DataType::QS8) ? 0x7F : 0x7FFF;
- break;
- default:
- ARM_COMPUTE_ERROR("Fixed point operation not supported");
- break;
- }
- fill(src, min, max, 0);
-
- return reference::fixed_point_operation<T>(src, op);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
- int _fractional_bits{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FIXED_POINT_FIXTURE */
diff --git a/tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h b/tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h
deleted file mode 100644
index 73386dfc97..0000000000
--- a/tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_FIXTURE
-#define ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/FixedPointPixelWiseMultiplication.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class FixedPointPixelWiseMultiplicationValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape shape,
- DataType dt_in1,
- DataType dt_in2,
- float scale,
- ConvertPolicy convert_policy,
- RoundingPolicy rounding_policy,
- int fixed_point_position)
- {
- _target = compute_target(shape, dt_in1, dt_in2, scale, convert_policy, rounding_policy, fixed_point_position);
- _reference = compute_reference(shape, dt_in1, dt_in2, scale, convert_policy, fixed_point_position);
- }
-
-protected:
- template <typename U>
- void fill(U &&tensor, unsigned int seed_offset)
- {
- library->fill_tensor_uniform(tensor, seed_offset);
- }
-
- TensorType compute_target(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, int fixed_point_position)
- {
- // Create tensors
- TensorType src1 = create_tensor<TensorType>(shape, dt_in1, 1, fixed_point_position);
- TensorType src2 = create_tensor<TensorType>(shape, dt_in2, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(shape, dt_in2, 1, fixed_point_position);
-
- // Create and configure function
- FunctionType multiply;
- multiply.configure(&src1, &src2, &dst, scale, convert_policy, rounding_policy);
-
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src1.allocator()->allocate();
- src2.allocator()->allocate();
- dst.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src1), 0);
- fill(AccessorType(src2), 1);
-
- // Compute function
- multiply.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, int fixed_point_position)
- {
- // Create reference
- SimpleTensor<T> src1{ shape, dt_in1, 1, fixed_point_position };
- SimpleTensor<T> src2{ shape, dt_in2, 1, fixed_point_position };
-
- // Fill reference
- fill(src1, 0);
- fill(src2, 1);
-
- return reference::fixed_point_pixel_wise_multiplication<T>(src1, src2, scale, convert_policy);
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_FIXTURE */
diff --git a/tests/validation/reference/FixedPointPixelWiseMultiplication.cpp b/tests/validation/reference/FixedPointPixelWiseMultiplication.cpp
deleted file mode 100644
index 636919b1ff..0000000000
--- a/tests/validation/reference/FixedPointPixelWiseMultiplication.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * dst OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "FixedPointPixelWiseMultiplication.h"
-
-#include "tests/validation/FixedPoint.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T>
-SimpleTensor<T> fixed_point_pixel_wise_multiplication(const SimpleTensor<T> &src1, const SimpleTensor<T> &src2, float scale, ConvertPolicy convert_policy)
-{
- using namespace fixed_point_arithmetic;
-
- SimpleTensor<T> dst(src2.shape(), src2.data_type(), 1, src2.fixed_point_position());
-
- const int fixed_point_position = src1.fixed_point_position();
-
- ARM_COMPUTE_ERROR_ON_MSG(src1.data_type() != src2.data_type() || src1.data_type() != dst.data_type(),
- "Tensors must all have the same DataType");
- ARM_COMPUTE_ERROR_ON_MSG(fixed_point_position != src2.fixed_point_position() || fixed_point_position != dst.fixed_point_position(),
- "Fixed-point position must be the same for both inputs and outputs");
-
- // Validate fixed_point_position
- ARM_COMPUTE_ERROR_ON((src1.data_type() == DataType::QS8) && (fixed_point_position == 0 || fixed_point_position > 7));
- ARM_COMPUTE_ERROR_ON((src1.data_type() == DataType::QS16) && (fixed_point_position == 0 || fixed_point_position > 15));
-
- const fixed_point<T> fp_scale(scale, fixed_point_position);
- const bool is_sat = convert_policy == ConvertPolicy::SATURATE;
-
- for(int i = 0; i < src1.num_elements(); ++i)
- {
- const fixed_point<T> val1(src1[i], fixed_point_position, true);
- fixed_point<T> res(src2[i], fixed_point_position, true);
- if(is_sat)
- {
- res = mul(mul(res, val1), fp_scale);
- }
- else
- {
- res = mul<OverflowPolicy::WRAP>(mul<OverflowPolicy::WRAP>(res, val1), fp_scale);
- }
- dst[i] = res.raw();
- }
-
- return dst;
-}
-
-// *INDENT-OFF*
-// clang-format off
-template SimpleTensor<qint8_t> fixed_point_pixel_wise_multiplication(const SimpleTensor<qint8_t> &src1, const SimpleTensor<qint8_t> &src2, float scale, ConvertPolicy convert_policy);
-template SimpleTensor<qint16_t> fixed_point_pixel_wise_multiplication(const SimpleTensor<qint16_t> &src1, const SimpleTensor<qint16_t> &src2, float scale, ConvertPolicy convert_policy);
-// *INDENT-ON*
-// clang-format on
-
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/reference/FixedPointPixelWiseMultiplication.h b/tests/validation/reference/FixedPointPixelWiseMultiplication.h
deleted file mode 100644
index 124a33c9da..0000000000
--- a/tests/validation/reference/FixedPointPixelWiseMultiplication.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_H__
-#define __ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_H__
-
-#include "tests/SimpleTensor.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T>
-SimpleTensor<T> fixed_point_pixel_wise_multiplication(const SimpleTensor<T> &src1, const SimpleTensor<T> &src2, float scale, ConvertPolicy convert_policy);
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_H__ */
diff --git a/tests/validation/reference/PixelWiseMultiplication.cpp b/tests/validation/reference/PixelWiseMultiplication.cpp
index 546a886ac9..7304fb0673 100644
--- a/tests/validation/reference/PixelWiseMultiplication.cpp
+++ b/tests/validation/reference/PixelWiseMultiplication.cpp
@@ -45,10 +45,10 @@ namespace
{
/** Compute the result of `src1 * src2 * scale`. The result type always matches the type of @p src2.
*
- * @param[in] src1 An input value. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] src1 An input value. Data types supported: U8/S16/F16/F32.
* @param[in] src2 An input value. Data types supported: same as @p src1.
* @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] convert_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
*/
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index 0edb6f2a56..b51e122d61 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -322,7 +322,6 @@ bool RandomAccessor::access_tensor(ITensor &tensor)
break;
}
case DataType::S8:
- case DataType::QS8:
{
std::uniform_int_distribution<int8_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>());
fill<int8_t>(tensor, distribution_s8);
@@ -335,7 +334,6 @@ bool RandomAccessor::access_tensor(ITensor &tensor)
break;
}
case DataType::S16:
- case DataType::QS16:
{
std::uniform_int_distribution<int16_t> distribution_s16(_lower.get<int16_t>(), _upper.get<int16_t>());
fill<int16_t>(tensor, distribution_s16);
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index f74623ea69..2bd30d53f8 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -545,9 +545,6 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DataType &data_type)
case DataType::U8:
os << "U8";
break;
- case DataType::QS8:
- os << "QS8";
- break;
case DataType::QASYMM8:
os << "QASYMM8";
break;
@@ -560,9 +557,6 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DataType &data_type)
case DataType::S16:
os << "S16";
break;
- case DataType::QS16:
- os << "QS16";
- break;
case DataType::U32:
os << "U32";
break;