From 0fc25454b6ce499b7f89792f91b81a61a42d3182 Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Mon, 18 Jun 2018 14:40:56 +0100 Subject: COMPMID-970 : Remove QS8 / QS16 support Remove QS8 and QS16 validation and benchmark tests Change-Id: I566f1474c1fafcb3903115ec2d3a003d73e4c93b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133762 Tested-by: Jenkins Reviewed-by: Pablo Tello --- tests/validation/CL/ActivationLayer.cpp | 65 +----- tests/validation/CL/ArithmeticAddition.cpp | 50 +---- tests/validation/CL/ArithmeticSubtraction.cpp | 52 ----- tests/validation/CL/BatchNormalizationLayer.cpp | 58 +----- tests/validation/CL/ConvolutionLayer.cpp | 55 ----- tests/validation/CL/DeconvolutionLayer.cpp | 4 +- tests/validation/CL/DepthConcatenateLayer.cpp | 36 ---- tests/validation/CL/DepthConvertLayer.cpp | 126 ------------ tests/validation/CL/DilatedConvolutionLayer.cpp | 57 ------ tests/validation/CL/DirectConvolutionLayer.cpp | 26 --- tests/validation/CL/FixedPoint/FixedPointTarget.h | 131 ------------ tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp | 96 --------- tests/validation/CL/Flatten.cpp | 32 --- tests/validation/CL/FullyConnectedLayer.cpp | 60 +----- tests/validation/CL/GEMM.cpp | 95 --------- tests/validation/CL/Im2Col.cpp | 6 +- tests/validation/CL/NormalizationLayer.cpp | 47 +---- tests/validation/CL/PixelWiseMultiplication.cpp | 37 +--- tests/validation/CL/PoolingLayer.cpp | 51 +---- tests/validation/CL/SoftmaxLayer.cpp | 51 +---- tests/validation/CL/WidthConcatenateLayer.cpp | 36 ---- tests/validation/Helpers.h | 20 -- tests/validation/NEON/ActivationLayer.cpp | 57 +----- tests/validation/NEON/ArithmeticAddition.cpp | 50 +---- tests/validation/NEON/ArithmeticSubtraction.cpp | 54 +---- tests/validation/NEON/BatchNormalizationLayer.cpp | 60 +----- tests/validation/NEON/Col2Im.cpp | 4 +- tests/validation/NEON/ConvolutionLayer.cpp | 49 ----- tests/validation/NEON/DeconvolutionLayer.cpp | 4 +- tests/validation/NEON/DepthConcatenateLayer.cpp | 36 ---- tests/validation/NEON/DepthConvertLayer.cpp | 143 +------------ tests/validation/NEON/DilatedConvolutionLayer.cpp | 53 ----- tests/validation/NEON/DirectConvolutionLayer.cpp | 46 ----- tests/validation/NEON/FixedPoint/FixedPoint.cpp | 135 ------------- .../validation/NEON/FixedPoint/FixedPointTarget.h | 225 --------------------- .../NEON/FixedPointPixelWiseMultiplication.cpp | 147 -------------- tests/validation/NEON/Flatten.cpp | 32 --- tests/validation/NEON/FullyConnectedLayer.cpp | 60 +----- tests/validation/NEON/GEMM.cpp | 95 --------- tests/validation/NEON/GEMMLowp.cpp | 2 +- tests/validation/NEON/Im2Col.cpp | 6 +- tests/validation/NEON/NormalizationLayer.cpp | 46 +---- tests/validation/NEON/PixelWiseMultiplication.cpp | 20 +- tests/validation/NEON/PoolingLayer.cpp | 46 +---- tests/validation/NEON/SoftmaxLayer.cpp | 48 +---- tests/validation/UNIT/FixedPoint.cpp | 211 ------------------- tests/validation/UNIT/TensorInfo.cpp | 6 - tests/validation/Validation.cpp | 4 - tests/validation/fixtures/FixedPointFixture.h | 123 ----------- .../FixedPointPixelWiseMultiplicationFixture.h | 119 ----------- .../FixedPointPixelWiseMultiplication.cpp | 85 -------- .../reference/FixedPointPixelWiseMultiplication.h | 43 ---- .../reference/PixelWiseMultiplication.cpp | 4 +- 53 files changed, 52 insertions(+), 3152 deletions(-) delete mode 100644 tests/validation/CL/FixedPoint/FixedPointTarget.h delete mode 100644 tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp delete mode 100644 tests/validation/NEON/FixedPoint/FixedPoint.cpp delete mode 100644 tests/validation/NEON/FixedPoint/FixedPointTarget.h delete mode 100644 tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp delete mode 100644 tests/validation/UNIT/FixedPoint.cpp delete mode 100644 tests/validation/fixtures/FixedPointFixture.h delete mode 100644 tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h delete mode 100644 tests/validation/reference/FixedPointPixelWiseMultiplication.cpp delete mode 100644 tests/validation/reference/FixedPointPixelWiseMultiplication.h (limited to 'tests/validation') diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 5fd0855ded..f122f6deeb 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -99,9 +99,7 @@ AbsoluteTolerance tolerance(ActivationLayerInfo::ActivationFunction activ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, - DataType::F32, - DataType::QS8, - DataType::QS16, + DataType::F32 }); /** Input data sets. */ @@ -114,12 +112,9 @@ TEST_SUITE(ActivationLayer) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })), shape, data_type, in_place) { - // Set fixed point position data type allowed - const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0; - // Create tensors - CLTensor src = create_tensor(shape, data_type, 1, fixed_point_position); - CLTensor dst = create_tensor(shape, data_type, 1, fixed_point_position); + CLTensor src = create_tensor(shape, data_type, 1); + CLTensor dst = create_tensor(shape, data_type, 1); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -165,9 +160,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Unsupported activation TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), @@ -175,9 +167,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), @@ -185,11 +174,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), })), - framework::dataset::make("Expected", { false, false, true, true, false, false, false, true, true })), + framework::dataset::make("Expected", { false, false, true, true, false, false })), input_info, output_info, act_info, expected) { ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS); @@ -237,49 +223,6 @@ TEST_SUITE_END() template using CLActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [3,5] because [1,2] and [6,7] ranges cause -// overflowing issues in most of the transcendentals functions. -FIXTURE_DATA_TEST_CASE(RunTiny, CLActivationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, CLActivationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture; diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index e9a892d744..9323ed2934 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -48,10 +48,6 @@ const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::mak DataType::U8)); const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticAdditionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticAdditionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), @@ -69,26 +65,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { true, true, false, false, false, false, true })), + framework::dataset::make("Expected", { true, true, false, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -174,44 +164,6 @@ TEST_SUITE_END() template using CLArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticAdditionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticAdditionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset), diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp index 43c94a7b9b..4ba5387a61 100644 --- a/tests/validation/CL/ArithmeticSubtraction.cpp +++ b/tests/validation/CL/ArithmeticSubtraction.cpp @@ -55,10 +55,6 @@ const auto ArithmeticSubtractionS16U8S16Dataset = combine(combine(framework::dat framework::dataset::make("DataType", DataType::S16)); const auto ArithmeticSubtractionU8S16S16Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticSubtractionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticSubtractionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), @@ -76,24 +72,18 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("Expected", { true, true, false, false, false, false, true })), input1_info, input2_info, output_info, expected) @@ -244,48 +234,6 @@ TEST_SUITE_END() template using CLArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticSubtractionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticSubtractionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset), diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp index f6dc6b377c..de775bf807 100644 --- a/tests/validation/CL/BatchNormalizationLayer.cpp +++ b/tests/validation/CL/BatchNormalizationLayer.cpp @@ -46,8 +46,6 @@ namespace { constexpr AbsoluteTolerance tolerance_f32(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ constexpr AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ -constexpr AbsoluteTolerance tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */ const auto act_infos = framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), @@ -65,7 +63,7 @@ using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))), - framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })), + framework::dataset::make("DataType", { DataType::F16, DataType::F32 })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout) { @@ -105,50 +103,34 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Unsupported fused activation TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(), })), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(5U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f), - ActivationLayerInfo(), - ActivationLayerInfo(), })), - framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, false, true, true})), + framework::dataset::make("Expected", { true, false, false, false, false, false, false})), input_info, output_info, mvbg_info, act_info, expected) { const auto &mean_info = mvbg_info; @@ -189,42 +171,6 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture, framework TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -template -using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture; - -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index 0c40953524..242c252015 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -48,7 +48,6 @@ namespace constexpr AbsoluteTolerance absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ @@ -57,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -256,58 +253,6 @@ TEST_SUITE_END() template using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture; diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp index 758cf3698a..269bf1587b 100644 --- a/tests/validation/CL/DeconvolutionLayer.cpp +++ b/tests/validation/CL/DeconvolutionLayer.cpp @@ -105,14 +105,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Non supported data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 4), // Non supported data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0), }), framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QS8, 5), + TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8, 5), TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11), TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0), diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp index 725af88a17..f5a5c230af 100644 --- a/tests/validation/CL/DepthConcatenateLayer.cpp +++ b/tests/validation/CL/DepthConcatenateLayer.cpp @@ -98,42 +98,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framewor TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/DepthConvertLayer.cpp b/tests/validation/CL/DepthConvertLayer.cpp index 603f4a07d6..c6e9f75a59 100644 --- a/tests/validation/CL/DepthConvertLayer.cpp +++ b/tests/validation/CL/DepthConvertLayer.cpp @@ -51,10 +51,6 @@ const auto DepthConvertLayerU16toU8Dataset = combine(framework::data const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32)); const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8)); const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32)); -const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8)); -const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16)); const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7); const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7); } // namespace @@ -73,10 +69,6 @@ template using CLDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture; template using CLDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture; -template -using CLDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture; -template -using CLDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture; TEST_SUITE(U8_to_U16) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), @@ -367,124 +359,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS32Fixture, frame } TEST_SUITE_END() -TEST_SUITE(Quantized_to_FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - CLTensor src = create_tensor(shape, dt, 1, fixed_point_position); - CLTensor dst = create_tensor(shape, DataType::F32, 1, fixed_point_position); - - // Create and Configure function - CLDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, CLDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, CLDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(FP32_to_Quantized) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - CLTensor src = create_tensor(shape, DataType::F32, 1, fixed_point_position); - CLTensor dst = create_tensor(shape, dt, 1, fixed_point_position); - - // Create and Configure function - CLDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, CLDepthConvertLayerToQS8FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, CLDepthConvertLayerToQS16FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToQS8FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToQS16FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp index 25931c0f4c..784e2001c1 100644 --- a/tests/validation/CL/DilatedConvolutionLayer.cpp +++ b/tests/validation/CL/DilatedConvolutionLayer.cpp @@ -45,7 +45,6 @@ namespace { RelativeTolerance tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ @@ -54,8 +53,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); } // namespace @@ -209,60 +206,6 @@ TEST_SUITE_END() template using CLGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture; diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp index 19d914e98e..d8b2d7e155 100644 --- a/tests/validation/CL/DirectConvolutionLayer.cpp +++ b/tests/validation/CL/DirectConvolutionLayer.cpp @@ -48,8 +48,6 @@ RelativeTolerance tolerance_fp16(half(0.2)); /**< Tolerance for floating RelativeTolerance tolerance_fp32(0.02f); /**< Tolerance for floating point tests */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ -constexpr AbsoluteTolerance tolerance_qs8(0); /**< Tolerance for fixed point tests */ -constexpr AbsoluteTolerance tolerance_qs16(0); /**< Tolerance for fixed point tests */ constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance for quantized tests */ /** Direct convolution data set. */ @@ -204,30 +202,6 @@ TEST_SUITE_END() template using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 2, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 2, 15)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture; template diff --git a/tests/validation/CL/FixedPoint/FixedPointTarget.h b/tests/validation/CL/FixedPoint/FixedPointTarget.h deleted file mode 100644 index 920bd374d3..0000000000 --- a/tests/validation/CL/FixedPoint/FixedPointTarget.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_FIXED_POINT_CL_TARGET -#define ARM_COMPUTE_TEST_FIXED_POINT_CL_TARGET - -#include "arm_compute/runtime/CL/CLScheduler.h" - -#include "tests/Globals.h" -#include "tests/Types.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -template -void compute_target_impl(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position, TensorType &src, TensorType &dst) -{ - std::string fixed_point_operation_kernel; -#ifndef EMBEDDED_KERNELS - std::cout << "EMBEDDED_KERNELS NOT DEFINED" << std::endl; - - fixed_point_operation_kernel += "#include \"fixed_point.h\"\n"; -#endif /* EMBEDDED_KERNELS */ - fixed_point_operation_kernel += - "__kernel void fixed_point_operation_qs8( \n" - " __global char* src, \n" - " __global char* dst) \n" - "{ \n" - " char16 in = vload16(0, src + get_global_id(0) * 16); \n" - " if(FIXED_POINT_OP == 0) \n" - " { \n" - " vstore16(EXP_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n" - " } \n" - " else if(FIXED_POINT_OP == 1) \n" - " { \n" - " vstore16(INVSQRT_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n" - " } \n" - " else \n" - " { \n" - " vstore16(LOG_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n" - " } \n" - "} \n" - "\n"; - - // Set build options - std::string build_opts = "-DFIXED_POINT_POS=" + support::cpp11::to_string(fixed_point_position); - build_opts += " -DDATA_TYPE=qs8"; - - // Fill tensors. - int min = 0; - int max = 0; - switch(op) - { - case(FixedPointOp::EXP): - min = -(1 << (fixed_point_position - 1)); - max = (1 << (fixed_point_position - 1)); - build_opts += " -DFIXED_POINT_OP=0"; - break; - case(FixedPointOp::INV_SQRT): - min = 1; - max = (dt == DataType::QS8) ? 0x7F : 0x7FFF; - build_opts += " -DFIXED_POINT_OP=1"; - break; - case(FixedPointOp::LOG): - min = (1 << (fixed_point_position - 1)); - max = (dt == DataType::QS8) ? 0x3F : 0x3FFF; - build_opts += " -DFIXED_POINT_OP=2"; - break; - default: - ARM_COMPUTE_ERROR("Fixed point operation not supported"); - break; - } - - std::uniform_int_distribution<> distribution(min, max); - library->fill(AccessorType(src), distribution, 0); - - std::vector sources; - -#ifndef EMBEDDED_KERNELS - build_opts += " -I" + CLKernelLibrary::get().get_kernel_path(); -#else /* EMBEDDED_KERNELS */ - sources.push_back(CLKernelLibrary::get().get_program_source("fixed_point.h")); -#endif /* EMBEDDED_KERNELS */ - - sources.push_back(fixed_point_operation_kernel); - - // Create program - ::cl::Program program(CLScheduler::get().context(), sources); - - // Build program - program.build(build_opts.c_str()); - - ::cl::Kernel kernel(program, "fixed_point_operation_qs8", nullptr); - - unsigned int idx = 0; - kernel.setArg(idx++, src.cl_buffer()); - kernel.setArg(idx++, dst.cl_buffer()); - - ::cl::NDRange gws(shape[0] / 16, 1, 1); - CLScheduler::get().queue().enqueueNDRangeKernel(kernel, 0, gws); -} -} // namespace -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_FIXED_POINT_TARGET */ diff --git a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp b/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp deleted file mode 100644 index 186fc4c673..0000000000 --- a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "FixedPointTarget.h" - -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/CL/CLTensorAllocator.h" -#include "tests/CL/CLAccessor.h" -#include "tests/PaddingCalculator.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -constexpr AbsoluteTolerance tolerance_exp(1.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential)*/ -constexpr AbsoluteTolerance tolerance_invsqrt(4.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) */ -constexpr AbsoluteTolerance tolerance_log(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) */ -} // namespace - -TEST_SUITE(CL) -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) - -template -using CLFixedPointFixture = FixedPointValidationFixture; - -TEST_SUITE(Exp) -FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::EXP)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_exp); -} -TEST_SUITE_END() - -TEST_SUITE(Log) -FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::LOG)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_log); -} -TEST_SUITE_END() - -TEST_SUITE(Invsqrt) -FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_invsqrt); -} -TEST_SUITE_END() - -TEST_SUITE_END() -TEST_SUITE_END() -TEST_SUITE_END() -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/CL/Flatten.cpp b/tests/validation/CL/Flatten.cpp index 04fbef6d79..ceaf123f46 100644 --- a/tests/validation/CL/Flatten.cpp +++ b/tests/validation/CL/Flatten.cpp @@ -78,38 +78,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLFlattenLayerFixture, framework::Dataset TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLFlattenLayerFixture, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFlattenLayerFixture, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLFlattenLayerFixture, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFlattenLayerFixture, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp index 7db9cf5b70..069d8a73ac 100644 --- a/tests/validation/CL/FullyConnectedLayer.cpp +++ b/tests/validation/CL/FullyConnectedLayer.cpp @@ -47,8 +47,6 @@ RelativeTolerance tolerance_f32(0.05f); RelativeTolerance tolerance_f16(half(0.2)); constexpr float tolerance_num = 0.07f; /**< Tolerance number */ -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance tolerance_fixed_point(1.f); /** Tolerance for quantized asymmetric operations */ constexpr AbsoluteTolerance tolerance_qasymm8(1); @@ -57,8 +55,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); @@ -119,36 +115,32 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types - TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::QS8, 2), // Mismatching fixed point position TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Wrongly reshaped weights }), framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16), - TensorInfo(TensorShape(315U, 271U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(217U, 231U), 1, DataType::F32), TensorInfo(TensorShape(217U, 315U), 1, DataType::F32), })), framework::dataset::make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32), - TensorInfo(TensorShape(271U), 1, DataType::QS8, 2), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(271U), 1, DataType::F32), TensorInfo(TensorShape(271U), 1, DataType::F32), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), - TensorInfo(TensorShape(271U, 3U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), })), - framework::dataset::make("TransposeWeights",{ true, true, true, false, true, true })), - framework::dataset::make("ReshapedWeights",{ false, false, false, false, false, false})), - framework::dataset::make("Expected", { false, false, true, true, false, false })), + framework::dataset::make("TransposeWeights",{ true, true, false, true, true })), + framework::dataset::make("ReshapedWeights",{ false, false, false, false, false})), + framework::dataset::make("Expected", { false, true, true, false, false })), input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected) { Status status = CLFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), @@ -202,52 +194,6 @@ TEST_SUITE_END() template using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLFullyConnectedLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, CLFullyConnectedLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture; diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp index 4e6cf826fa..d066281843 100644 --- a/tests/validation/CL/GEMM.cpp +++ b/tests/validation/CL/GEMM.cpp @@ -53,7 +53,6 @@ RelativeTolerance tolerance_f32(0.001f); /**< Tolerance value for compari constexpr float abs_tolerance_f32( 0.0001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for floating point data types in case using relative tolerance fails because of small values */ RelativeTolerance tolerance_f16(half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ -constexpr AbsoluteTolerance tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr float tolerance_num = 0.02f; /**< Tolerance number */ const auto data_interleave = framework::dataset::make("M", 8, 14) * framework::dataset::make("N", 7, 14); @@ -62,8 +61,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, }); } // namespace @@ -84,31 +81,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMInterleave4x4Fixture, framework::DatasetM } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using CLGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using CLGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() // INTERLEAVE_4X4 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes), @@ -149,33 +121,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMTranspose1xWFixture, framework::DatasetMo } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using CLGEMMTranspose1xW = CLSynthetizeFunctionWithZeroConstantBorder; -using CLGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using CLGEMMTranspose1xW = CLSynthetizeFunctionWithZeroConstantBorder; -using CLGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() //TRANSPOSE_1XW TEST_SUITE(Float) @@ -210,46 +155,6 @@ TEST_SUITE_END() template using CLGEMMFixedPointFixture = GEMMValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(OUTPUT_3D) TEST_SUITE(Float) TEST_SUITE(FP32) diff --git a/tests/validation/CL/Im2Col.cpp b/tests/validation/CL/Im2Col.cpp index bfe0665fa9..ac36267c99 100644 --- a/tests/validation/CL/Im2Col.cpp +++ b/tests/validation/CL/Im2Col.cpp @@ -58,20 +58,18 @@ using CLIm2Col = CLSynthetizeFunction; DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::U8), // Unsupported data type TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::F32), // Mismatching data type - TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Bias not supported with QASYMM8 TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Mismatching shapes TensorInfo(TensorShape(10U, 12U, 2U, 2U), 1, DataType::QASYMM8), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(18U, 80U, 1U, 2U), 1, DataType::QASYMM8), })), - framework::dataset::make("HasBias", { true, true, true, true, false, false })), - framework::dataset::make("Expected", { false, false, false, false, true, true })), + framework::dataset::make("HasBias", { true, true, true, false, false })), + framework::dataset::make("Expected", { false, false, false, true, true })), input_info, output_info, has_bias, expected) { diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp index a3db6e60ba..f6a8e7a9fb 100644 --- a/tests/validation/CL/NormalizationLayer.cpp +++ b/tests/validation/CL/NormalizationLayer.cpp @@ -47,10 +47,6 @@ namespace RelativeTolerance tolerance_f16(half(0.2)); RelativeTolerance tolerance_f32(0.05f); -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance tolerance_qs8(2); -constexpr AbsoluteTolerance tolerance_qs16(4); - /** Input data set. */ const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), @@ -83,7 +79,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), }), @@ -91,7 +86,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), })), @@ -100,10 +94,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( NormalizationLayerInfo(NormType::IN_MAP_1D, 4), NormalizationLayerInfo(NormType::IN_MAP_2D, 5), NormalizationLayerInfo(NormType::IN_MAP_1D, 5), - NormalizationLayerInfo(NormType::IN_MAP_1D, 5), NormalizationLayerInfo(NormType::CROSS_MAP, 5), })), - framework::dataset::make("Expected", { false, false, false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, false, true })), input_info, output_info, norm_info, expected) { ARM_COMPUTE_EXPECT(bool(CLNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)) == expected, framework::LogLevel::ERRORS); @@ -145,44 +138,6 @@ TEST_SUITE_END() template using CLNormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/PixelWiseMultiplication.cpp b/tests/validation/CL/PixelWiseMultiplication.cpp index 6a71175f51..99b084f6bc 100644 --- a/tests/validation/CL/PixelWiseMultiplication.cpp +++ b/tests/validation/CL/PixelWiseMultiplication.cpp @@ -28,7 +28,6 @@ #include "tests/datasets/ShapeDatasets.h" #include "tests/framework/Macros.h" #include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h" #include "tests/validation/fixtures/PixelWiseMultiplicationFixture.h" namespace arm_compute @@ -39,7 +38,6 @@ namespace validation { namespace { -const float scale_unity = 1.f; const float scale_255 = 1.f / 255.f; // *INDENT-OFF* @@ -81,12 +79,6 @@ using CLPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationF template using CLPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture; template -using CLPixelWiseMultiplicationToQS8Fixture = PixelWiseMultiplicationValidationFixture; -template -using CLPixelWiseMultiplicationToQS16Fixture = PixelWiseMultiplicationValidationFixture; -template -using CLFixedPointPixelWiseMultiplicationFixture = FixedPointPixelWiseMultiplicationValidationFixture; -template using CLPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture; TEST_SUITE(CL) @@ -101,9 +93,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid scale TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching data type - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Invalid scale }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -111,9 +100,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS16, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -121,12 +107,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Scale",{ 2.f, 2.f, 2.f, -1.f, 1.f, 1.f, 1.f, 1.f, 3.f})), - framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })), + framework::dataset::make("Scale",{ 2.f, 2.f, 2.f, -1.f, 1.f, 1.f})), + framework::dataset::make("Expected", { true, true, false, false, false, false})), input1_info, input2_info, output_info, scale, expected) { bool has_error = bool(CLPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO)); @@ -155,22 +138,6 @@ TEST_SUITE_END() // PixelWiseMultiplication TEST_SUITE(FixedPointPixelWiseMultiplication) -TEST_SUITE(QS8) - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE_END() // QS16 - TEST_SUITE(Broadcast) PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, BroadcastFixture, PRECOMMIT, SmallShapesBroadcast(), F32, F32, scale_255, TO_NEAREST_UP, VALIDATE(float, 1.f)) TEST_SUITE_END() // Broadcast diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp index 7bd090cb77..b28a5ebca9 100644 --- a/tests/validation/CL/PoolingLayer.cpp +++ b/tests/validation/CL/PoolingLayer.cpp @@ -49,11 +49,6 @@ const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingType framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); -/** Input data set for fixed-point data types */ -const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })), - framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), - framework::dataset::make("ExcludePadding", { true, false })); - /** Input data set for asymmetric data type */ const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(5, 7), Size2D(8, 9) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), @@ -61,8 +56,6 @@ const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::datas constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ constexpr AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ -constexpr AbsoluteTolerance tolerance_qs16(6); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit fixed-point type */ -constexpr AbsoluteTolerance tolerance_qs8(3); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit fixed-point type */ constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */ } // namespace @@ -74,8 +67,6 @@ TEST_SUITE(PoolingLayer) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), // Invalid parameters @@ -85,8 +76,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), @@ -95,8 +84,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)), @@ -105,7 +92,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( PoolingLayerInfo(PoolingType::MAX), PoolingLayerInfo(PoolingType::AVG), })), - framework::dataset::make("Expected", { false, false, false, true, false, false, false, true, false, true })), + framework::dataset::make("Expected", { false, false, false, false, false, true, false, true })), input_info, output_info, pool_info, expected) { ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS); @@ -164,42 +151,6 @@ TEST_SUITE_END() // Float template using CLPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLPoolingLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 4))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 4))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLPoolingLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 12))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 12))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() // QS16 -TEST_SUITE_END() // fixedPoint - TEST_SUITE(Quantized) template diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp index f9204f9424..114529edfd 100644 --- a/tests/validation/CL/SoftmaxLayer.cpp +++ b/tests/validation/CL/SoftmaxLayer.cpp @@ -47,9 +47,6 @@ namespace RelativeTolerance tolerance_f16(half(0.2)); RelativeTolerance tolerance_f32(0.001f); -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance tolerance_fixed_point(2); - /** Tolerance for quantized operations */ constexpr AbsoluteTolerance tolerance_qasymm8(1); @@ -59,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::QASYMM8, DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, }); } // namespace @@ -106,27 +101,23 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datase DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, // Invalid output quantization info QuantizationInfo(1.f/256, 12)), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 12)), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 12)), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 0)), })), - framework::dataset::make("Expected", { false, false, false, false, false, true, true, true })), + framework::dataset::make("Expected", { false, false, false, false, true, true })), input_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); @@ -176,46 +167,6 @@ TEST_SUITE_END() template using CLSoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLSoftmaxLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::SoftmaxLayerTinyShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, CLSoftmaxLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::SoftmaxLayerTinyShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using CLSoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture; diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp index 0ff95df957..36a5e6fcfb 100644 --- a/tests/validation/CL/WidthConcatenateLayer.cpp +++ b/tests/validation/CL/WidthConcatenateLayer.cpp @@ -133,42 +133,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWidthConcatenateLayerFixture, framewor TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 76b3e2fbdc..49432d693e 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -104,26 +104,6 @@ std::pair get_activation_layer_test_bounds(ActivationLayerInfo::Activation break; } break; - case DataType::QS8: - case DataType::QS16: - switch(activation) - { - case ActivationLayerInfo::ActivationFunction::LOGISTIC: - case ActivationLayerInfo::ActivationFunction::SOFT_RELU: - case ActivationLayerInfo::ActivationFunction::TANH: - // Reduce range as exponent overflows - bounds = std::make_pair(-(1 << fixed_point_position), 1 << fixed_point_position); - break; - case ActivationLayerInfo::ActivationFunction::SQRT: - // Reduce range as sqrt should take a non-negative number - // Can't be zero either as inv_sqrt is used in NEON. - bounds = std::make_pair(1, std::numeric_limits::max()); - break; - default: - bounds = std::make_pair(std::numeric_limits::lowest(), std::numeric_limits::max()); - break; - } - break; default: ARM_COMPUTE_ERROR("Unsupported data type"); } diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 868683259c..289ca4870e 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -60,10 +60,6 @@ AbsoluteTolerance tolerance(DataType data_type, ActivationLayerInfo::Acti case ActivationLayerInfo::ActivationFunction::TANH: switch(data_type) { - case DataType::QS8: - return AbsoluteTolerance(5.f); - case DataType::QS16: - return AbsoluteTolerance(11.f); case DataType::F16: return AbsoluteTolerance(0.01f); default: @@ -82,8 +78,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); /** Input data sets. */ @@ -143,22 +137,16 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), })), - framework::dataset::make("Expected", { false, true, false, false, true })), + framework::dataset::make("Expected", { false, true, false})), input_info, output_info, act_info, expected) { bool is_valid = bool(NEActivationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), act_info)); @@ -210,49 +198,6 @@ TEST_SUITE_END() template using NEActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [3,5] because [1,2] and [6,7] ranges cause -// overflowing issues in most of the transcendentals functions. -FIXTURE_DATA_TEST_CASE(RunTiny, NEActivationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NEActivationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture; diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp index 98eeaa2f61..b01e5d929d 100644 --- a/tests/validation/NEON/ArithmeticAddition.cpp +++ b/tests/validation/NEON/ArithmeticAddition.cpp @@ -48,10 +48,6 @@ const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::mak DataType::U8)); const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticAdditionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticAdditionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -74,26 +70,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { true, true, false, false, false, false, true })), + framework::dataset::make("Expected", { true, true, false, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -176,44 +166,6 @@ TEST_SUITE_END() template using NEArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticAdditionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticAdditionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(F16) diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp index 01a107c17d..fc25465e6d 100644 --- a/tests/validation/NEON/ArithmeticSubtraction.cpp +++ b/tests/validation/NEON/ArithmeticSubtraction.cpp @@ -55,10 +55,6 @@ const auto ArithmeticSubtractionS16U8S16Dataset = combine(combine(framework::dat framework::dataset::make("DataType", DataType::S16)); const auto ArithmeticSubtractionU8S16S16Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticSubtractionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticSubtractionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -78,26 +74,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { true, true, false, false, false, false, true })), + framework::dataset::make("Expected", { true, true, false, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -246,48 +236,6 @@ TEST_SUITE_END() template using NEArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticSubtractionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticSubtractionFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp index 53fd0163ff..3a18a0a93b 100644 --- a/tests/validation/NEON/BatchNormalizationLayer.cpp +++ b/tests/validation/NEON/BatchNormalizationLayer.cpp @@ -48,9 +48,7 @@ constexpr AbsoluteTolerance tolerance_f32(0.00001f); /**< Tolerance value #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ -constexpr AbsoluteTolerance tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */ -const auto act_infos = framework::dataset::make("ActivationInfo", +const auto act_infos = framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), @@ -66,7 +64,7 @@ using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))), - framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })), + framework::dataset::make("DataType", { DataType::F32 })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout) { @@ -106,46 +104,30 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(), })), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(5U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f), - ActivationLayerInfo(), - ActivationLayerInfo(), })), - framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, true, true})), + framework::dataset::make("Expected", { true, false, false, false, false, false})), input_info, output_info, mvbg_info, act_info, expected) { const auto &mean_info = mvbg_info; @@ -191,42 +173,6 @@ TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE_END() -TEST_SUITE(Quantized) -template -using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture; - -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/Col2Im.cpp b/tests/validation/NEON/Col2Im.cpp index 9f2415d628..e4a52f25dd 100644 --- a/tests/validation/NEON/Col2Im.cpp +++ b/tests/validation/NEON/Col2Im.cpp @@ -43,19 +43,17 @@ TEST_SUITE(Col2Im) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::S64), // Unsupported data type TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Mismatching data type - TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Invalid output shape TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::F32), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F32), })), framework::dataset::make("ConvolvedWidth", { 3, 3, 3, 3, 3 })), framework::dataset::make("ConvolvedHeight", { 4, 4, 4, 4, 4 })), - framework::dataset::make("Expected", { false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, true })), input_info, output_info, convolved_width, convolved_height, expected) { bool status = bool(NECol2Im::validate(&input_info, &output_info, Size2D(convolved_width, convolved_height))); diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 2c17487107..747d8d2f62 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -52,7 +52,6 @@ const AbsoluteTolerance abs_tolerance_f32(0.002f); /**< Absolute toleranc #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -const AbsoluteTolerance tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ /** CNN data types */ @@ -62,8 +61,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -251,52 +248,6 @@ TEST_SUITE_END() template using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture; diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp index 3bb6d6f8fc..87d413f202 100644 --- a/tests/validation/NEON/DeconvolutionLayer.cpp +++ b/tests/validation/NEON/DeconvolutionLayer.cpp @@ -102,14 +102,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Non supported data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 4), // Non supported data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0), }), framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QS8, 5), + TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 5), TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11), TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0), diff --git a/tests/validation/NEON/DepthConcatenateLayer.cpp b/tests/validation/NEON/DepthConcatenateLayer.cpp index c3bb8a93b7..0e0b674e41 100644 --- a/tests/validation/NEON/DepthConcatenateLayer.cpp +++ b/tests/validation/NEON/DepthConcatenateLayer.cpp @@ -100,42 +100,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture, framewor TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp index ea63a5f52b..2bd3db7075 100644 --- a/tests/validation/NEON/DepthConvertLayer.cpp +++ b/tests/validation/NEON/DepthConvertLayer.cpp @@ -44,19 +44,14 @@ namespace validation namespace { /** Input data sets **/ -const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16)); -const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)); -const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32)); -const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8)); -const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32)); -const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8)); -const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32)); -const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8)); -const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16)); -const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7); -const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7); +const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16)); +const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)); +const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32)); +const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8)); +const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32)); +const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8)); +const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32)); +const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7); } // namespace TEST_SUITE(NEON) @@ -73,10 +68,6 @@ template using NEDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture; template using NEDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture; -template -using NEDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture; -template -using NEDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture; TEST_SUITE(U8_to_U16) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), @@ -367,124 +358,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS32Fixture, frame } TEST_SUITE_END() -TEST_SUITE(Quantized_to_FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - Tensor src = create_tensor(shape, dt, 1, fixed_point_position); - Tensor dst = create_tensor(shape, DataType::F32, 1, fixed_point_position); - - // Create and Configure function - NEDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, NEDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, NEDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToFP32FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(FP32_to_Quantized) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - Tensor src = create_tensor(shape, DataType::F32, 1, fixed_point_position); - Tensor dst = create_tensor(shape, dt, 1, fixed_point_position); - - // Create and Configure function - NEDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, NEDepthConvertLayerToQS8FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, NEDepthConvertLayerToQS16FixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToQS8FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToQS16FixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp index d9fd093c8e..2888a6535e 100644 --- a/tests/validation/NEON/DilatedConvolutionLayer.cpp +++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp @@ -47,7 +47,6 @@ const AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for c #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -const AbsoluteTolerance tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ /** CNN data types */ @@ -57,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); } // namespace @@ -203,56 +200,6 @@ TEST_SUITE_END() template using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture; diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp index f700758d21..4995d881cc 100644 --- a/tests/validation/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation/NEON/DirectConvolutionLayer.cpp @@ -42,7 +42,6 @@ namespace validation { namespace { -constexpr AbsoluteTolerance tolerance_qs(1.f); /**< Tolerance for fixed point tests */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance tolerance_fp16(0.01f); /**< Tolerance for half precision floating point tests */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ @@ -59,33 +58,12 @@ const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", combine(framework::dataset::make("PadY", { 0, 3 }), framework::dataset::make("KernelSize", 5)))); -const auto data_pad_qs8 = concat(combine(framework::dataset::make("PadX", 0), - combine(framework::dataset::make("PadY", 0), - framework::dataset::make("KernelSize", 1))), - combine(framework::dataset::make("PadX", { 0, 2 }), - combine(framework::dataset::make("PadY", { 0, 2 }), - framework::dataset::make("KernelSize", 3)))); - const auto data_f32 = combine(datasets::SmallDirectConvolutionShapes(), combine(framework::dataset::make("StrideX", { 1, 3 }), combine(framework::dataset::make("StrideY", { 1, 3 }), combine(data_pad_f32, framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); -const auto data_qs8 = combine(datasets::TinyDirectConvolutionShapes(), - combine(framework::dataset::make("StrideX", { 1, 3 }), - combine(framework::dataset::make("StrideY", { 1, 3 }), - combine(data_pad_qs8, - framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); - -/** Direct convolution QS16 data set. */ -const auto data_qs16 = combine(datasets::TinyDirectConvolutionShapes(), - combine(framework::dataset::make("StrideX", { 1, 3 }), - combine(framework::dataset::make("StrideY", { 1, 3 }), - combine(framework::dataset::make("PadX", 0), - combine(framework::dataset::make("PadY", 0), - combine(framework::dataset::make("KernelSize", 1), - framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))))); /** Activation function Dataset*/ const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", { @@ -205,30 +183,6 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) }); -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(data_qs8, framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - QuantizedActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// We test for fixed point precision [4,13] -FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(data_qs16, framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 4, 14)), - QuantizedActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/FixedPoint/FixedPoint.cpp b/tests/validation/NEON/FixedPoint/FixedPoint.cpp deleted file mode 100644 index a583b587ae..0000000000 --- a/tests/validation/NEON/FixedPoint/FixedPoint.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "FixedPointTarget.h" - -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/Tensor.h" -#include "arm_compute/runtime/TensorAllocator.h" -#include "tests/NEON/Accessor.h" -#include "tests/PaddingCalculator.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -constexpr AbsoluteTolerance tolerance_exp_qs8(0.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential) for DataType::QS8 */ -constexpr AbsoluteTolerance tolerance_exp_qs16(1.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential) for DataType::QS16 */ -constexpr AbsoluteTolerance tolerance_invsqrt_qs8(4.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) for DataType::QS8 */ -constexpr AbsoluteTolerance tolerance_invsqrt_qs16(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) for DataType::QS16 */ -constexpr AbsoluteTolerance tolerance_log_qs8(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) for DataType::QS8 */ -constexpr AbsoluteTolerance tolerance_log_qs16(7.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) for DataType::QS16 */ -} // namespace - -TEST_SUITE(NEON) -TEST_SUITE(FixedPoint) -template -using NEFixedPointFixture = FixedPointValidationFixture; - -TEST_SUITE(QS8) -TEST_SUITE(Exp) - -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::EXP)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_exp_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Invsqrt) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_invsqrt_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Log) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::LOG)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_log_qs8, 0); -} -TEST_SUITE_END() -TEST_SUITE_END() - -TEST_SUITE(QS16) -TEST_SUITE(Exp) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FixedPointOp", FixedPointOp::EXP)), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_exp_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Invsqrt) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("Shape", TensorShape(8192U)), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_invsqrt_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Log) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FixedPointOp", FixedPointOp::LOG)), - framework::dataset::make("FractionalBits", 4, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_log_qs16, 0); -} -TEST_SUITE_END() -TEST_SUITE_END() - -TEST_SUITE_END() -TEST_SUITE_END() -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/NEON/FixedPoint/FixedPointTarget.h b/tests/validation/NEON/FixedPoint/FixedPointTarget.h deleted file mode 100644 index 31ccc0817e..0000000000 --- a/tests/validation/NEON/FixedPoint/FixedPointTarget.h +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET -#define ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET - -#include "arm_compute/core/NEON/NEFixedPoint.h" - -#include "tests/Globals.h" -#include "tests/Types.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -template -void compute_target_impl(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position, TensorType &src, TensorType &dst) -{ - Window window; - - switch(dt) - { - case DataType::QS8: - { - constexpr unsigned int num_elems_processed_per_iteration = 16; - window = calculate_max_window(*src.info(), Steps(num_elems_processed_per_iteration)); - AccessWindowHorizontal input_access(src.info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(dst.info(), 0, num_elems_processed_per_iteration); - update_window_and_padding(window, input_access, output_access); - break; - } - case DataType::QS16: - { - constexpr unsigned int num_elems_processed_per_iteration = 8; - window = calculate_max_window(*src.info(), Steps(num_elems_processed_per_iteration)); - AccessWindowHorizontal input_access(src.info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(dst.info(), 0, num_elems_processed_per_iteration); - update_window_and_padding(window, input_access, output_access); - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - - int min; - int max; - switch(op) - { - case FixedPointOp::EXP: - { - // Fill tensors. Keep the range between [-1.0, 1.0) so the result won't - // overflow. - min = -(1 << (fixed_point_position - 1)); - max = (1 << (fixed_point_position - 1)); - break; - } - case FixedPointOp::INV_SQRT: - { - if(dt == DataType::QS8) - { - // Fill tensors. Keep the range between [1, 127). - min = 1; - max = 127; - } - else - { - // Fill tensors. Keep the range between [1, 0x7FFF) - min = 1; - max = 0x7FFF; - } - break; - } - case FixedPointOp::LOG: - { - if(dt == DataType::QS8) - { - // Fill tensors. Keep the range between [(1 << (fixed_point_position - 1), 63) so the result won't - // overflow. E.g. for Q2.5 ln(0.001) = -6.9, which cannot be represented. - min = (1 << (fixed_point_position - 1)); - max = 0x3F; - } - else - { - // Fill tensors. Keep the range between [(1 << (fixed_point_position - 1), 0x3FFF) so the result won't - // overflow. - min = (1 << (fixed_point_position - 1)); - max = 0x3FFF; - } - break; - } - case FixedPointOp::RECIPROCAL: - { - if(dt == DataType::QS8) - { - // Fill tensors. Keep the range between [15, 100) so the result won't - // overflow. E.g. for Q2.5 reciprocal(0.001) = 1000, which cannot be represented. - min = 15; - max = 0x7F; - } - else - { - // Fill tensors. Keep the range between [15, 0x7FFF) so the result won't - // overflow. - min = 15; - max = 0x7FFF; - } - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - - std::uniform_int_distribution<> distribution(min, max); - library->fill(AccessorType(src), distribution, 0); - - Iterator input(&src, window); - Iterator output(&dst, window); - - const auto loop_function = [&](const Coordinates & id) - { - switch(dt) - { - case DataType::QS8: - { - const qint8x16_t qs8in = vld1q_s8(reinterpret_cast(input.ptr())); - switch(op) - { - case FixedPointOp::EXP: - { - // Use saturated exp - vst1q_s8(reinterpret_cast(output.ptr()), vqexpq_qs8(qs8in, fixed_point_position)); - break; - } - case FixedPointOp::INV_SQRT: - { - vst1q_s8(reinterpret_cast(output.ptr()), vqinvsqrtq_qs8(qs8in, fixed_point_position)); - break; - } - case FixedPointOp::LOG: - { - vst1q_s8(reinterpret_cast(output.ptr()), vlogq_qs8(qs8in, fixed_point_position)); - break; - } - case FixedPointOp::RECIPROCAL: - { - vst1q_s8(reinterpret_cast(output.ptr()), vrecipq_qs8(qs8in, fixed_point_position)); - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - break; - } - case DataType::QS16: - { - const qint16x8_t qs16in = vld1q_qs16(reinterpret_cast(input.ptr())); - switch(op) - { - case FixedPointOp::EXP: - { - // Use saturated exp - vst1q_qs16(reinterpret_cast(output.ptr()), vqexpq_qs16(qs16in, fixed_point_position)); - break; - } - case FixedPointOp::INV_SQRT: - { - vst1q_qs16(reinterpret_cast(output.ptr()), vqinvsqrtq_qs16(qs16in, fixed_point_position)); - break; - } - case FixedPointOp::LOG: - { - vst1q_qs16(reinterpret_cast(output.ptr()), vlogq_qs16(qs16in, fixed_point_position)); - break; - } - case FixedPointOp::RECIPROCAL: - { - vst1q_qs16(reinterpret_cast(output.ptr()), vqrecipq_qs16(qs16in, fixed_point_position)); - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - }; - - execute_window_loop(window, loop_function, input, output); -} -} // namespace -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET */ diff --git a/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp b/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp deleted file mode 100644 index d7954dec64..0000000000 --- a/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h" -#include "tests/NEON/Accessor.h" -#include "tests/datasets/ConvertPolicyDataset.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Macros.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -const float tolerance = 1.f; -const float scale_255 = 1.f / 255.f; -const float scale_unity = 1.f; - -// *INDENT-OFF* -// clang-format off -#define FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, SCALE, RP, FPP_START, FPP_END) \ - FIXTURE_DATA_TEST_CASE(TEST_NAME, NEFixedPointPixelWiseMultiplication##FIXTURE, framework::DatasetMode::MODE, \ - combine(combine(combine(combine(combine(combine( \ - datasets::SHAPES, \ - framework::dataset::make("DataType1", DataType::DT1)), \ - framework::dataset::make("DataType2", DataType::DT2)), \ - framework::dataset::make("Scale", std::move(SCALE))), \ - datasets::ConvertPolicies()), \ - framework::dataset::make("RoundingPolicy", RoundingPolicy::RP)), \ - framework::dataset::make("FixedPointPosition", FPP_START, FPP_END))) \ - { \ - validate(Accessor(_target), _reference); \ - } - -#define FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, RP, FPP, TOLERANCE) \ - FIXTURE_DATA_TEST_CASE(TEST_NAME, NEFixedPointPixelWiseMultiplication##FIXTURE, framework::DatasetMode::MODE, \ - combine(combine(combine(combine(combine(combine( \ - datasets::SHAPES, \ - framework::dataset::make("DataType1", DataType::DT1)), \ - framework::dataset::make("DataType2", DataType::DT2)), \ - framework::dataset::make("Scale", 1.f / static_cast(1 << (FPP)))), \ - datasets::ConvertPolicies()), \ - framework::dataset::make("RoundingPolicy", RoundingPolicy::RP)), \ - framework::dataset::make("FixedPointPosition", FPP))) \ - { \ - validate(Accessor(_target), _reference, AbsoluteTolerance(TOLERANCE), 0.f); \ - } -// clang-format on -// *INDENT-ON* -} // namespace - -template -using NEFixedPointPixelWiseMultiplicationFixture = FixedPointPixelWiseMultiplicationValidationFixture; - -TEST_SUITE(NEON) -TEST_SUITE(FixedPointPixelWiseMultiplication) - -TEST_SUITE(QS8) - -TEST_SUITE(Scale255) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, scale_255, TO_NEAREST_UP, 1, 7) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, scale_255, TO_NEAREST_UP, 1, 7) -TEST_SUITE_END() // Scale255 - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE(ScaleOther) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther1, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 1, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther2, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 2, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther3, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 3, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther4, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 4, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther5, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 5, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther6, Fixture, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 6, tolerance) - -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther1, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 1, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther2, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 2, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther3, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 3, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther4, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 4, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther5, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 5, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther6, Fixture, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 6, tolerance) -TEST_SUITE_END() // ScaleOther - -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) - -TEST_SUITE(Scale255) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, scale_255, TO_NEAREST_UP, 1, 15) -TEST_SUITE_END() // Scale255 - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture, NIGHTLY, SmallShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE(ScaleOther) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther1, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 1, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther2, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 2, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther3, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 3, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther4, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 4, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther5, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 5, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther6, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 6, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther7, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 7, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther8, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 8, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther9, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 9, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther10, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 10, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther11, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 11, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther12, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 12, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther13, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 13, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther14, Fixture, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 14, tolerance) -TEST_SUITE_END() // ScaleOther - -TEST_SUITE_END() // QS16 - -TEST_SUITE_END() -TEST_SUITE_END() -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/NEON/Flatten.cpp b/tests/validation/NEON/Flatten.cpp index 332716925a..86f1a11262 100644 --- a/tests/validation/NEON/Flatten.cpp +++ b/tests/validation/NEON/Flatten.cpp @@ -80,38 +80,6 @@ TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEFlattenLayerFixture, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFlattenLayerFixture, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEFlattenLayerFixture, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFlattenLayerFixture, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp index ed0edcd3be..3adcf61dc9 100644 --- a/tests/validation/NEON/FullyConnectedLayer.cpp +++ b/tests/validation/NEON/FullyConnectedLayer.cpp @@ -47,8 +47,6 @@ constexpr RelativeTolerance tolerance_f32(0.01f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr RelativeTolerance tolerance_f16(0.01f); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance tolerance_fixed_point(1.f); /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -57,8 +55,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); const auto FullyConnectedParameters = combine(framework::dataset::make("TransposeWeights", { false, true }), framework::dataset::make("ReshapeWeights", { false, true })); @@ -119,7 +115,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types - TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::QS8, 2), // Mismatching fixed point position TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions @@ -127,7 +122,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), }), framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16), - TensorInfo(TensorShape(315U, 271U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(217U, 315U), 1, DataType::F32), @@ -135,7 +129,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), })), framework::dataset::make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32), - TensorInfo(TensorShape(271U), 1, DataType::QS8, 2), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(271U), 1, DataType::F32), @@ -143,16 +136,15 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( TensorInfo(TensorShape(192U), 1, DataType::F32), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), - TensorInfo(TensorShape(271U, 3U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), })), - framework::dataset::make("TransposeWeights",{ true, true, true, false, true, true, true })), - framework::dataset::make("ReshapedWeights",{ false, false, false, false, false, false , false})), - framework::dataset::make("Expected", { false, false, true, true, false, false, true })), + framework::dataset::make("TransposeWeights",{ true, true, false, true, true, true })), + framework::dataset::make("ReshapedWeights",{ false, false, false, false, false , false})), + framework::dataset::make("Expected", { false, true, true, false, false, true })), input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected) { Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), transpose_weights, reshaped_weights); @@ -203,52 +195,6 @@ TEST_SUITE_END() template using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, NEFullyConnectedLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NEFullyConnectedLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp index bfa2db443a..e0f63a8a2d 100644 --- a/tests/validation/NEON/GEMM.cpp +++ b/tests/validation/NEON/GEMM.cpp @@ -50,7 +50,6 @@ namespace validation namespace { constexpr AbsoluteTolerance tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ -constexpr AbsoluteTolerance tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -59,8 +58,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); const auto data_interleave = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12); @@ -82,33 +79,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMo } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using NEGEMMTranspose1xW = NESynthetizeFunctionWithZeroConstantBorder; -using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using NEGEMMTranspose1xW = NESynthetizeFunctionWithZeroConstantBorder; -using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() // TRANSPOSE_1XW TEST_SUITE(INTERLEAVE_4X4) @@ -123,31 +93,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetM } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() // INTERLEAVE_4X4 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes), @@ -211,46 +156,6 @@ TEST_SUITE_END() template using NEGEMMFixedPointFixture = GEMMValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index ed24d618f5..eb350e1029 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -102,7 +102,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::c // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4 - TensorInfo(TensorShape(21U, 13U), 1, DataType::QS8, 2), // Mismatching data type + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32, 2), // Mismatching data type TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), diff --git a/tests/validation/NEON/Im2Col.cpp b/tests/validation/NEON/Im2Col.cpp index 3a45fa7ae4..d2e7875853 100644 --- a/tests/validation/NEON/Im2Col.cpp +++ b/tests/validation/NEON/Im2Col.cpp @@ -52,20 +52,18 @@ TEST_SUITE(Im2Col) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::U8), // Unsupported data type TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::F32), // Mismatching data type - TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Bias not supported with QASYMM8 TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Mismatching shapes TensorInfo(TensorShape(10U, 12U, 2U, 2U), 1, DataType::QASYMM8), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(18U, 80U, 1U, 2U), 1, DataType::QASYMM8), })), - framework::dataset::make("HasBias", { true, true, true, true, false, false })), - framework::dataset::make("Expected", { false, false, false, false, false, true })), + framework::dataset::make("HasBias", { true, true, true, false, false })), + framework::dataset::make("Expected", { false, false, false, false, true })), input_info, output_info, has_bias, expected) { bool status = bool(NEIm2Col::validate(&input_info, &output_info, Size2D(3U, 3U), PadStrideInfo(), has_bias, false, false)); diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp index cf63675246..8c66611f49 100644 --- a/tests/validation/NEON/NormalizationLayer.cpp +++ b/tests/validation/NEON/NormalizationLayer.cpp @@ -48,9 +48,6 @@ namespace constexpr AbsoluteTolerance tolerance_f16(0.001f); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ constexpr AbsoluteTolerance tolerance_f32(0.00001f); -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance tolerance_qs8(2); -constexpr AbsoluteTolerance tolerance_qs16(4); /** Input data set. */ const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), @@ -76,7 +73,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), }), @@ -84,7 +80,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), })), @@ -93,10 +88,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( NormalizationLayerInfo(NormType::IN_MAP_1D, 4), NormalizationLayerInfo(NormType::IN_MAP_2D, 5), NormalizationLayerInfo(NormType::IN_MAP_1D, 5), - NormalizationLayerInfo(NormType::IN_MAP_1D, 5), NormalizationLayerInfo(NormType::CROSS_MAP, 1), })), - framework::dataset::make("Expected", { false, false, false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, false, true })), input_info, output_info, norm_info, expected) { bool is_valid = bool(NENormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)); @@ -141,44 +135,6 @@ TEST_SUITE_END() template using NENormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, NENormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NENormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/PixelWiseMultiplication.cpp b/tests/validation/NEON/PixelWiseMultiplication.cpp index 9304c8bad0..77da473e0d 100644 --- a/tests/validation/NEON/PixelWiseMultiplication.cpp +++ b/tests/validation/NEON/PixelWiseMultiplication.cpp @@ -114,10 +114,6 @@ using NEPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationF template using NEPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture; template -using NEPixelWiseMultiplicationToQS8Fixture = PixelWiseMultiplicationValidationFixture; -template -using NEPixelWiseMultiplicationToQS16Fixture = PixelWiseMultiplicationValidationFixture; -template using NEPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture; TEST_SUITE(NEON) @@ -132,9 +128,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid scale TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching data type - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Invalid scale + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data type }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -142,9 +136,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS16, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -152,12 +144,10 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), })), - framework::dataset::make("Scale",{ scale_unity, scale_unity, scale_unity, -1.f, scale_unity, scale_unity, scale_unity, scale_unity, 3.f})), - framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })), + framework::dataset::make("Scale",{ scale_unity, scale_unity, scale_unity, -1.f, scale_unity, scale_unity, scale_unity})), + framework::dataset::make("Expected", { true, true, false, false, false, false, false })), input1_info, input2_info, output_info, scale, expected) { bool has_error = bool(NEPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO)); diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index b44f945eb7..8762f1f7cc 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -65,8 +65,6 @@ constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value f #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for float types */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance tolerance_qs8(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */ -constexpr AbsoluteTolerance tolerance_qs16(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */ constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */ } // namespace @@ -80,8 +78,6 @@ TEST_SUITE(PoolingLayer) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11), // Window shrink TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling @@ -90,8 +86,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), @@ -99,8 +93,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)), @@ -108,7 +100,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( PoolingLayerInfo(PoolingType::MAX), PoolingLayerInfo(PoolingType::AVG), })), - framework::dataset::make("Expected", { false, false, false, false, false, false, true, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, true, false, false, true })), input_info, output_info, pool_info, expected) { bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)); @@ -170,42 +162,6 @@ TEST_SUITE_END() // Float template using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 5))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 5))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 13))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 13))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() // QS16 -TEST_SUITE_END() // FixedPoint - TEST_SUITE(Quantized) template diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index e091c0268a..4a1aa23ac1 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -47,8 +47,6 @@ constexpr AbsoluteTolerance tolerance_f32(0.000001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance tolerance_f16(0.0001f); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance tolerance_fixed_point(2); /** Tolerance for quantized operations */ constexpr AbsoluteTolerance tolerance_qasymm8(1); @@ -60,8 +58,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); } // namespace @@ -101,15 +97,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datase DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { false, false, false, true })), + framework::dataset::make("Expected", { false, false, true })), input_info, output_info, expected) { bool is_valid = bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))); @@ -162,46 +156,6 @@ TEST_SUITE_END() template using NESoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, NESoftmaxLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SoftmaxLayerTinyShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NESoftmaxLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SoftmaxLayerTinyShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - template using NESoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture; diff --git a/tests/validation/UNIT/FixedPoint.cpp b/tests/validation/UNIT/FixedPoint.cpp deleted file mode 100644 index 66a4b25fc9..0000000000 --- a/tests/validation/UNIT/FixedPoint.cpp +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "tests/validation/FixedPoint.h" - -#include "tests/Globals.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -const auto FuncNamesDataset = framework::dataset::make("FunctionNames", { FixedPointOp::ADD, - FixedPointOp::SUB, - FixedPointOp::MUL, - FixedPointOp::EXP, - FixedPointOp::LOG, - FixedPointOp::INV_SQRT - }); - -template -void load_array_from_numpy(const std::string &file, std::vector &shape, std::vector &data) // NOLINT -{ - try - { - npy::LoadArrayFromNumpy(file, shape, data); - } - catch(const std::runtime_error &e) - { - throw framework::FileNotFound("Could not load npy file: " + file + " (" + e.what() + ")"); - } -} -} // namespace - -TEST_SUITE(UNIT) -TEST_SUITE(FixedPoint) - -// *INDENT-OFF* -// clang-format off -DATA_TEST_CASE(FixedPointQS8Inputs, framework::DatasetMode::ALL, combine( - FuncNamesDataset, - framework::dataset::make("FractionalBits", 1, 7)), - func_name, frac_bits) -// clang-format on -// *INDENT-ON* -{ - std::vector data; - std::vector shape; //NOLINT - - std::string func_name_lower = to_string(func_name); - std::transform(func_name_lower.begin(), func_name_lower.end(), func_name_lower.begin(), ::tolower); - - const std::string inputs_file = library->path() - + "fixed_point/" - + func_name_lower - + "_Q8." - + support::cpp11::to_string(frac_bits) - + ".in.npy"; - - load_array_from_numpy(inputs_file, shape, data); - - // Values stored as doubles so reinterpret as floats - const auto *float_val = reinterpret_cast(&data[0]); - const size_t num_elements = data.size() * sizeof(double) / sizeof(float); - - for(unsigned int i = 0; i < num_elements; ++i) - { - // Convert to fixed point - fixed_point_arithmetic::fixed_point in_val(float_val[i], frac_bits); - - // Check that the value didn't change - ARM_COMPUTE_EXPECT(static_cast(in_val) == float_val[i], framework::LogLevel::ERRORS); - } -} - -//FIXME: Figure out how to handle expected failures properly -// The last input argument specifies the expected number of failures for a -// given combination of (function name, number of fractional bits) as defined -// by the first two arguments. - -// *INDENT-OFF* -// clang-format off -DATA_TEST_CASE(FixedPointQS8Outputs, framework::DatasetMode::ALL, zip(combine( - FuncNamesDataset, - framework::dataset::make("FractionalBits", 1, 7)), - framework::dataset::make("ExpectedFailures", { 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 7, 8, 13, 2, 0, 0, - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 5, 33, 96 })), - func_name, frac_bits, expected_failures) -// clang-format on -// *INDENT-ON* -{ - std::vector in_data; - std::vector in_shape; //NOLINT - - std::vector out_data; - std::vector out_shape; //NOLINT - - std::string func_name_lower = to_string(func_name); - std::transform(func_name_lower.begin(), func_name_lower.end(), func_name_lower.begin(), ::tolower); - - const std::string base_file_name = library->path() - + "fixed_point/" - + func_name_lower - + "_Q8." - + support::cpp11::to_string(frac_bits); - - const std::string inputs_file = base_file_name + ".in.npy"; - const std::string reference_file = base_file_name + ".out.npy"; - - load_array_from_numpy(inputs_file, in_shape, in_data); - load_array_from_numpy(reference_file, out_shape, out_data); - - ARM_COMPUTE_EXPECT(in_shape.front() == out_shape.front(), framework::LogLevel::ERRORS); - - const float step_size = std::pow(2.f, -frac_bits); - int64_t num_mismatches = 0; - - // Values stored as doubles so reinterpret as floats - const auto *float_val = reinterpret_cast(&in_data[0]); - const auto *ref_val = reinterpret_cast(&out_data[0]); - - const size_t num_elements = in_data.size() * sizeof(double) / sizeof(float); - - for(unsigned int i = 0; i < num_elements; ++i) - { - fixed_point_arithmetic::fixed_point in_val(float_val[i], frac_bits); - fixed_point_arithmetic::fixed_point out_val(0.f, frac_bits); - - float tolerance = 0.f; - - if(func_name == FixedPointOp::ADD) - { - out_val = in_val + in_val; - } - else if(func_name == FixedPointOp::SUB) - { - out_val = in_val - in_val; //NOLINT - } - else if(func_name == FixedPointOp::MUL) - { - tolerance = 1.f * step_size; - out_val = in_val * in_val; - } - else if(func_name == FixedPointOp::EXP) - { - tolerance = 2.f * step_size; - out_val = fixed_point_arithmetic::exp(in_val); - } - else if(func_name == FixedPointOp::LOG) - { - tolerance = 4.f * step_size; - out_val = fixed_point_arithmetic::log(in_val); - } - else if(func_name == FixedPointOp::INV_SQRT) - { - tolerance = 5.f * step_size; - out_val = fixed_point_arithmetic::inv_sqrt(in_val); - } - - if(std::abs(static_cast(out_val) - ref_val[i]) > tolerance) - { - ARM_COMPUTE_TEST_INFO("input = " << in_val); - ARM_COMPUTE_TEST_INFO("output = " << out_val); - ARM_COMPUTE_TEST_INFO("reference = " << ref_val[i]); - ARM_COMPUTE_TEST_INFO("tolerance = " << tolerance); - - ARM_COMPUTE_TEST_INFO((std::abs(static_cast(out_val) - ref_val[i]) <= tolerance)); - - ++num_mismatches; - } - } - - ARM_COMPUTE_EXPECT(num_mismatches == expected_failures, framework::LogLevel::ERRORS); -} - -TEST_SUITE_END() -TEST_SUITE_END() -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/UNIT/TensorInfo.cpp b/tests/validation/UNIT/TensorInfo.cpp index dd7ae6d18c..b78f656932 100644 --- a/tests/validation/UNIT/TensorInfo.cpp +++ b/tests/validation/UNIT/TensorInfo.cpp @@ -112,12 +112,6 @@ TEST_CASE(TensorInfoBuild, framework::DatasetMode::ALL) ARM_COMPUTE_EXPECT(info.data_type() == DataType::S32, framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(info.num_channels() == 3, framework::LogLevel::ERRORS); - // Update data type channels and set fixed point position - info.set_data_type(DataType::QS8).set_num_channels(1).set_fixed_point_position(3); - ARM_COMPUTE_EXPECT(info.data_type() == DataType::QS8, framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(info.num_channels() == 1, framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(info.fixed_point_position() == 3, framework::LogLevel::ERRORS); - // Update data type and set quantization info info.set_data_type(DataType::QASYMM8).set_quantization_info(QuantizationInfo(0.5f, 15)); ARM_COMPUTE_EXPECT(info.data_type() == DataType::QASYMM8, framework::LogLevel::ERRORS); diff --git a/tests/validation/Validation.cpp b/tests/validation/Validation.cpp index d01ac12f97..0bc1d96306 100644 --- a/tests/validation/Validation.cpp +++ b/tests/validation/Validation.cpp @@ -62,14 +62,10 @@ double get_double_data(const void *ptr, DataType data_type) return *reinterpret_cast(ptr); case DataType::S8: return *reinterpret_cast(ptr); - case DataType::QS8: - return *reinterpret_cast(ptr); case DataType::U16: return *reinterpret_cast(ptr); case DataType::S16: return *reinterpret_cast(ptr); - case DataType::QS16: - return *reinterpret_cast(ptr); case DataType::U32: return *reinterpret_cast(ptr); case DataType::S32: diff --git a/tests/validation/fixtures/FixedPointFixture.h b/tests/validation/fixtures/FixedPointFixture.h deleted file mode 100644 index 3f3bb6e0b7..0000000000 --- a/tests/validation/fixtures/FixedPointFixture.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_FIXED_POINT_FIXTURE -#define ARM_COMPUTE_TEST_FIXED_POINT_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/Helpers.h" -#include "tests/validation/reference/FixedPoint.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template -class FixedPointValidationFixture : public framework::Fixture -{ -public: - template - void setup(TensorShape shape, DataType dt, FixedPointOp op, int fractional_bits) - { - _fractional_bits = fractional_bits; - _target = compute_target(shape, dt, op, fractional_bits); - _reference = compute_reference(shape, dt, op, fractional_bits); - } - -protected: - template - void fill(U &&tensor, int min, int max, int i) - { - std::uniform_int_distribution<> distribution(min, max); - library->fill(tensor, distribution, i); - } - - TensorType compute_target(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position) - { - // Create tensors - TensorType src = create_tensor(shape, dt, 1, fixed_point_position); - TensorType dst = create_tensor(shape, dt, 1, fixed_point_position); - - // Allocate tensors - src.allocator()->allocate(); - dst.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - compute_target_impl(shape, dt, op, fixed_point_position, src, dst); - - return dst; - } - - SimpleTensor compute_reference(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position) - { - // Create reference - SimpleTensor src{ shape, dt, 1, fixed_point_position }; - - // Fill reference - int min = 0; - int max = 0; - switch(op) - { - case FixedPointOp::EXP: - min = -(1 << (fixed_point_position - 1)); - max = (1 << (fixed_point_position - 1)); - break; - case FixedPointOp::INV_SQRT: - min = 1; - max = (dt == DataType::QS8) ? 0x7F : 0x7FFF; - break; - case FixedPointOp::LOG: - min = (1 << (fixed_point_position - 1)); - max = (dt == DataType::QS8) ? 0x3F : 0x3FFF; - break; - case FixedPointOp::RECIPROCAL: - min = 15; - max = (dt == DataType::QS8) ? 0x7F : 0x7FFF; - break; - default: - ARM_COMPUTE_ERROR("Fixed point operation not supported"); - break; - } - fill(src, min, max, 0); - - return reference::fixed_point_operation(src, op); - } - - TensorType _target{}; - SimpleTensor _reference{}; - int _fractional_bits{}; -}; -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_FIXED_POINT_FIXTURE */ diff --git a/tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h b/tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h deleted file mode 100644 index 73386dfc97..0000000000 --- a/tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_FIXTURE -#define ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/reference/FixedPointPixelWiseMultiplication.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template -class FixedPointPixelWiseMultiplicationValidationFixture : public framework::Fixture -{ -public: - template - void setup(TensorShape shape, - DataType dt_in1, - DataType dt_in2, - float scale, - ConvertPolicy convert_policy, - RoundingPolicy rounding_policy, - int fixed_point_position) - { - _target = compute_target(shape, dt_in1, dt_in2, scale, convert_policy, rounding_policy, fixed_point_position); - _reference = compute_reference(shape, dt_in1, dt_in2, scale, convert_policy, fixed_point_position); - } - -protected: - template - void fill(U &&tensor, unsigned int seed_offset) - { - library->fill_tensor_uniform(tensor, seed_offset); - } - - TensorType compute_target(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, int fixed_point_position) - { - // Create tensors - TensorType src1 = create_tensor(shape, dt_in1, 1, fixed_point_position); - TensorType src2 = create_tensor(shape, dt_in2, 1, fixed_point_position); - TensorType dst = create_tensor(shape, dt_in2, 1, fixed_point_position); - - // Create and configure function - FunctionType multiply; - multiply.configure(&src1, &src2, &dst, scale, convert_policy, rounding_policy); - - ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - src1.allocator()->allocate(); - src2.allocator()->allocate(); - dst.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - fill(AccessorType(src1), 0); - fill(AccessorType(src2), 1); - - // Compute function - multiply.run(); - - return dst; - } - - SimpleTensor compute_reference(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, int fixed_point_position) - { - // Create reference - SimpleTensor src1{ shape, dt_in1, 1, fixed_point_position }; - SimpleTensor src2{ shape, dt_in2, 1, fixed_point_position }; - - // Fill reference - fill(src1, 0); - fill(src2, 1); - - return reference::fixed_point_pixel_wise_multiplication(src1, src2, scale, convert_policy); - } - - TensorType _target{}; - SimpleTensor _reference{}; -}; -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_FIXTURE */ diff --git a/tests/validation/reference/FixedPointPixelWiseMultiplication.cpp b/tests/validation/reference/FixedPointPixelWiseMultiplication.cpp deleted file mode 100644 index 636919b1ff..0000000000 --- a/tests/validation/reference/FixedPointPixelWiseMultiplication.cpp +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * dst OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "FixedPointPixelWiseMultiplication.h" - -#include "tests/validation/FixedPoint.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace reference -{ -template -SimpleTensor fixed_point_pixel_wise_multiplication(const SimpleTensor &src1, const SimpleTensor &src2, float scale, ConvertPolicy convert_policy) -{ - using namespace fixed_point_arithmetic; - - SimpleTensor dst(src2.shape(), src2.data_type(), 1, src2.fixed_point_position()); - - const int fixed_point_position = src1.fixed_point_position(); - - ARM_COMPUTE_ERROR_ON_MSG(src1.data_type() != src2.data_type() || src1.data_type() != dst.data_type(), - "Tensors must all have the same DataType"); - ARM_COMPUTE_ERROR_ON_MSG(fixed_point_position != src2.fixed_point_position() || fixed_point_position != dst.fixed_point_position(), - "Fixed-point position must be the same for both inputs and outputs"); - - // Validate fixed_point_position - ARM_COMPUTE_ERROR_ON((src1.data_type() == DataType::QS8) && (fixed_point_position == 0 || fixed_point_position > 7)); - ARM_COMPUTE_ERROR_ON((src1.data_type() == DataType::QS16) && (fixed_point_position == 0 || fixed_point_position > 15)); - - const fixed_point fp_scale(scale, fixed_point_position); - const bool is_sat = convert_policy == ConvertPolicy::SATURATE; - - for(int i = 0; i < src1.num_elements(); ++i) - { - const fixed_point val1(src1[i], fixed_point_position, true); - fixed_point res(src2[i], fixed_point_position, true); - if(is_sat) - { - res = mul(mul(res, val1), fp_scale); - } - else - { - res = mul(mul(res, val1), fp_scale); - } - dst[i] = res.raw(); - } - - return dst; -} - -// *INDENT-OFF* -// clang-format off -template SimpleTensor fixed_point_pixel_wise_multiplication(const SimpleTensor &src1, const SimpleTensor &src2, float scale, ConvertPolicy convert_policy); -template SimpleTensor fixed_point_pixel_wise_multiplication(const SimpleTensor &src1, const SimpleTensor &src2, float scale, ConvertPolicy convert_policy); -// *INDENT-ON* -// clang-format on - -} // namespace reference -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/reference/FixedPointPixelWiseMultiplication.h b/tests/validation/reference/FixedPointPixelWiseMultiplication.h deleted file mode 100644 index 124a33c9da..0000000000 --- a/tests/validation/reference/FixedPointPixelWiseMultiplication.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_H__ -#define __ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_H__ - -#include "tests/SimpleTensor.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace reference -{ -template -SimpleTensor fixed_point_pixel_wise_multiplication(const SimpleTensor &src1, const SimpleTensor &src2, float scale, ConvertPolicy convert_policy); -} // namespace reference -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* __ARM_COMPUTE_TEST_FIXED_POINT_PIXEL_WISE_MULTIPLICATION_H__ */ diff --git a/tests/validation/reference/PixelWiseMultiplication.cpp b/tests/validation/reference/PixelWiseMultiplication.cpp index 546a886ac9..7304fb0673 100644 --- a/tests/validation/reference/PixelWiseMultiplication.cpp +++ b/tests/validation/reference/PixelWiseMultiplication.cpp @@ -45,10 +45,10 @@ namespace { /** Compute the result of `src1 * src2 * scale`. The result type always matches the type of @p src2. * - * @param[in] src1 An input value. Data types supported: U8/QS8/QS16/S16/F16/F32. + * @param[in] src1 An input value. Data types supported: U8/S16/F16/F32. * @param[in] src2 An input value. Data types supported: same as @p src1. * @param[in] scale Scale to apply after multiplication. - * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1. + * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. * @param[in] convert_policy Overflow policy. Supported overflow policies: Wrap, Saturate * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even. */ -- cgit v1.2.1