diff options
author | Vidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com> | 2018-06-18 14:40:56 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:53:09 +0000 |
commit | 0fc25454b6ce499b7f89792f91b81a61a42d3182 (patch) | |
tree | 0c988cbbef3f5f1967cc2f77672d05a702191349 /tests/validation/CL | |
parent | f5f0ecca39c905bc8c1f90e7469eed2221ca662f (diff) | |
download | ComputeLibrary-0fc25454b6ce499b7f89792f91b81a61a42d3182.tar.gz |
COMPMID-970 : Remove QS8 / QS16 support
Remove QS8 and QS16 validation and benchmark tests
Change-Id: I566f1474c1fafcb3903115ec2d3a003d73e4c93b
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133762
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'tests/validation/CL')
21 files changed, 19 insertions, 1152 deletions
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 5fd0855ded..f122f6deeb 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -99,9 +99,7 @@ AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, - DataType::F32, - DataType::QS8, - DataType::QS16, + DataType::F32 }); /** Input data sets. */ @@ -114,12 +112,9 @@ TEST_SUITE(ActivationLayer) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), framework::dataset::make("InPlace", { false, true })), shape, data_type, in_place) { - // Set fixed point position data type allowed - const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0; - // Create tensors - CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position); - CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position); + CLTensor src = create_tensor<CLTensor>(shape, data_type, 1); + CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -165,9 +160,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Unsupported activation TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), @@ -175,9 +167,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), @@ -185,11 +174,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), })), - framework::dataset::make("Expected", { false, false, true, true, false, false, false, true, true })), + framework::dataset::make("Expected", { false, false, true, true, false, false })), input_info, output_info, act_info, expected) { ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS); @@ -237,49 +223,6 @@ TEST_SUITE_END() template <typename T> using CLActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<CLTensor, CLAccessor, CLActivationLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [3,5] because [1,2] and [6,7] ranges cause -// overflowing issues in most of the transcendentals functions. -FIXTURE_DATA_TEST_CASE(RunTiny, CLActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, CLActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>; diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index e9a892d744..9323ed2934 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -48,10 +48,6 @@ const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::mak DataType::U8)); const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticAdditionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticAdditionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), @@ -69,26 +65,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { true, true, false, false, false, false, true })), + framework::dataset::make("Expected", { true, true, false, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -174,44 +164,6 @@ TEST_SUITE_END() template <typename T> using CLArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset), diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp index 43c94a7b9b..4ba5387a61 100644 --- a/tests/validation/CL/ArithmeticSubtraction.cpp +++ b/tests/validation/CL/ArithmeticSubtraction.cpp @@ -55,10 +55,6 @@ const auto ArithmeticSubtractionS16U8S16Dataset = combine(combine(framework::dat framework::dataset::make("DataType", DataType::S16)); const auto ArithmeticSubtractionU8S16S16Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticSubtractionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticSubtractionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), @@ -76,24 +72,18 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("Expected", { true, true, false, false, false, false, true })), input1_info, input2_info, output_info, expected) @@ -244,48 +234,6 @@ TEST_SUITE_END() template <typename T1, typename T2 = T1, typename T3 = T1> using CLArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T1, T2, T3>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset), diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp index f6dc6b377c..de775bf807 100644 --- a/tests/validation/CL/BatchNormalizationLayer.cpp +++ b/tests/validation/CL/BatchNormalizationLayer.cpp @@ -46,8 +46,6 @@ namespace { constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ -constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */ const auto act_infos = framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), @@ -65,7 +63,7 @@ using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))), - framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })), + framework::dataset::make("DataType", { DataType::F16, DataType::F32 })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout) { @@ -105,50 +103,34 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Unsupported fused activation TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(), })), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(5U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f), - ActivationLayerInfo(), - ActivationLayerInfo(), })), - framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, false, true, true})), + framework::dataset::make("Expected", { true, false, false, false, false, false, false})), input_info, output_info, mvbg_info, act_info, expected) { const auto &mean_info = mvbg_info; @@ -189,42 +171,6 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -template <typename T> -using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>; - -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index 0c40953524..242c252015 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -48,7 +48,6 @@ namespace constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ @@ -57,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -256,58 +253,6 @@ TEST_SUITE_END() template <typename T> using CLGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>; diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp index 758cf3698a..269bf1587b 100644 --- a/tests/validation/CL/DeconvolutionLayer.cpp +++ b/tests/validation/CL/DeconvolutionLayer.cpp @@ -105,14 +105,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Non supported data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 4), // Non supported data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0), }), framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QS8, 5), + TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8, 5), TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11), TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0), diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp index 725af88a17..f5a5c230af 100644 --- a/tests/validation/CL/DepthConcatenateLayer.cpp +++ b/tests/validation/CL/DepthConcatenateLayer.cpp @@ -98,42 +98,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<float>, framewor TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/DepthConvertLayer.cpp b/tests/validation/CL/DepthConvertLayer.cpp index 603f4a07d6..c6e9f75a59 100644 --- a/tests/validation/CL/DepthConvertLayer.cpp +++ b/tests/validation/CL/DepthConvertLayer.cpp @@ -51,10 +51,6 @@ const auto DepthConvertLayerU16toU8Dataset = combine(framework::data const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32)); const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8)); const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32)); -const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8)); -const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16)); const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7); const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7); } // namespace @@ -73,10 +69,6 @@ template <typename T> using CLDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, uint32_t>; template <typename T> using CLDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, float>; -template <typename T> -using CLDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int8_t>; -template <typename T> -using CLDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<CLTensor, CLAccessor, CLDepthConvertLayer, T, int16_t>; TEST_SUITE(U8_to_U16) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), @@ -367,124 +359,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConvertLayerToS32Fixture<int16_t>, frame } TEST_SUITE_END() -TEST_SUITE(Quantized_to_FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - CLTensor src = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position); - CLTensor dst = create_tensor<CLTensor>(shape, DataType::F32, 1, fixed_point_position); - - // Create and Configure function - CLDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, CLDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, CLDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(FP32_to_Quantized) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - CLTensor src = create_tensor<CLTensor>(shape, DataType::F32, 1, fixed_point_position); - CLTensor dst = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position); - - // Create and Configure function - CLDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, CLDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, CLDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, CLDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, CLDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp index 25931c0f4c..784e2001c1 100644 --- a/tests/validation/CL/DilatedConvolutionLayer.cpp +++ b/tests/validation/CL/DilatedConvolutionLayer.cpp @@ -45,7 +45,6 @@ namespace { RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ @@ -54,8 +53,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); } // namespace @@ -209,60 +206,6 @@ TEST_SUITE_END() template <typename T> using CLGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using CLGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>; diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp index 19d914e98e..d8b2d7e155 100644 --- a/tests/validation/CL/DirectConvolutionLayer.cpp +++ b/tests/validation/CL/DirectConvolutionLayer.cpp @@ -48,8 +48,6 @@ RelativeTolerance<half> tolerance_fp16(half(0.2)); /**< Tolerance for floating RelativeTolerance<float> tolerance_fp32(0.02f); /**< Tolerance for floating point tests */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ -constexpr AbsoluteTolerance<int8_t> tolerance_qs8(0); /**< Tolerance for fixed point tests */ -constexpr AbsoluteTolerance<int16_t> tolerance_qs16(0); /**< Tolerance for fixed point tests */ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance for quantized tests */ /** Direct convolution data set. */ @@ -204,30 +202,6 @@ TEST_SUITE_END() template <typename T> using CLDirectConvolutionLayerFixedPointFixture = DirectConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 2, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Run, CLDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(data_fixed_point, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 2, 15)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>; template <typename T> diff --git a/tests/validation/CL/FixedPoint/FixedPointTarget.h b/tests/validation/CL/FixedPoint/FixedPointTarget.h deleted file mode 100644 index 920bd374d3..0000000000 --- a/tests/validation/CL/FixedPoint/FixedPointTarget.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_FIXED_POINT_CL_TARGET -#define ARM_COMPUTE_TEST_FIXED_POINT_CL_TARGET - -#include "arm_compute/runtime/CL/CLScheduler.h" - -#include "tests/Globals.h" -#include "tests/Types.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -template <typename TensorType, typename AccessorType, typename T> -void compute_target_impl(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position, TensorType &src, TensorType &dst) -{ - std::string fixed_point_operation_kernel; -#ifndef EMBEDDED_KERNELS - std::cout << "EMBEDDED_KERNELS NOT DEFINED" << std::endl; - - fixed_point_operation_kernel += "#include \"fixed_point.h\"\n"; -#endif /* EMBEDDED_KERNELS */ - fixed_point_operation_kernel += - "__kernel void fixed_point_operation_qs8( \n" - " __global char* src, \n" - " __global char* dst) \n" - "{ \n" - " char16 in = vload16(0, src + get_global_id(0) * 16); \n" - " if(FIXED_POINT_OP == 0) \n" - " { \n" - " vstore16(EXP_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n" - " } \n" - " else if(FIXED_POINT_OP == 1) \n" - " { \n" - " vstore16(INVSQRT_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n" - " } \n" - " else \n" - " { \n" - " vstore16(LOG_OP_EXPAND(in, DATA_TYPE, 16, FIXED_POINT_POS), 0, dst + get_global_id(0) * 16); \n" - " } \n" - "} \n" - "\n"; - - // Set build options - std::string build_opts = "-DFIXED_POINT_POS=" + support::cpp11::to_string(fixed_point_position); - build_opts += " -DDATA_TYPE=qs8"; - - // Fill tensors. - int min = 0; - int max = 0; - switch(op) - { - case(FixedPointOp::EXP): - min = -(1 << (fixed_point_position - 1)); - max = (1 << (fixed_point_position - 1)); - build_opts += " -DFIXED_POINT_OP=0"; - break; - case(FixedPointOp::INV_SQRT): - min = 1; - max = (dt == DataType::QS8) ? 0x7F : 0x7FFF; - build_opts += " -DFIXED_POINT_OP=1"; - break; - case(FixedPointOp::LOG): - min = (1 << (fixed_point_position - 1)); - max = (dt == DataType::QS8) ? 0x3F : 0x3FFF; - build_opts += " -DFIXED_POINT_OP=2"; - break; - default: - ARM_COMPUTE_ERROR("Fixed point operation not supported"); - break; - } - - std::uniform_int_distribution<> distribution(min, max); - library->fill(AccessorType(src), distribution, 0); - - std::vector<std::string> sources; - -#ifndef EMBEDDED_KERNELS - build_opts += " -I" + CLKernelLibrary::get().get_kernel_path(); -#else /* EMBEDDED_KERNELS */ - sources.push_back(CLKernelLibrary::get().get_program_source("fixed_point.h")); -#endif /* EMBEDDED_KERNELS */ - - sources.push_back(fixed_point_operation_kernel); - - // Create program - ::cl::Program program(CLScheduler::get().context(), sources); - - // Build program - program.build(build_opts.c_str()); - - ::cl::Kernel kernel(program, "fixed_point_operation_qs8", nullptr); - - unsigned int idx = 0; - kernel.setArg(idx++, src.cl_buffer()); - kernel.setArg(idx++, dst.cl_buffer()); - - ::cl::NDRange gws(shape[0] / 16, 1, 1); - CLScheduler::get().queue().enqueueNDRangeKernel(kernel, 0, gws); -} -} // namespace -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_FIXED_POINT_TARGET */ diff --git a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp b/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp deleted file mode 100644 index 186fc4c673..0000000000 --- a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "FixedPointTarget.h" - -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/CL/CLTensorAllocator.h" -#include "tests/CL/CLAccessor.h" -#include "tests/PaddingCalculator.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -constexpr AbsoluteTolerance<float> tolerance_exp(1.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential)*/ -constexpr AbsoluteTolerance<float> tolerance_invsqrt(4.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) */ -constexpr AbsoluteTolerance<float> tolerance_log(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) */ -} // namespace - -TEST_SUITE(CL) -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) - -template <typename T> -using CLFixedPointFixture = FixedPointValidationFixture<CLTensor, CLAccessor, T>; - -TEST_SUITE(Exp) -FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::EXP)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_exp); -} -TEST_SUITE_END() - -TEST_SUITE(Log) -FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::LOG)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_log); -} -TEST_SUITE_END() - -TEST_SUITE(Invsqrt) -FIXTURE_DATA_TEST_CASE(RunSmall, CLFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_invsqrt); -} -TEST_SUITE_END() - -TEST_SUITE_END() -TEST_SUITE_END() -TEST_SUITE_END() -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/CL/Flatten.cpp b/tests/validation/CL/Flatten.cpp index 04fbef6d79..ceaf123f46 100644 --- a/tests/validation/CL/Flatten.cpp +++ b/tests/validation/CL/Flatten.cpp @@ -78,38 +78,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLFlattenLayerFixture<half>, framework::Dataset TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLFlattenLayerFixture<int8_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFlattenLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLFlattenLayerFixture<int16_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFlattenLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp index 7db9cf5b70..069d8a73ac 100644 --- a/tests/validation/CL/FullyConnectedLayer.cpp +++ b/tests/validation/CL/FullyConnectedLayer.cpp @@ -47,8 +47,6 @@ RelativeTolerance<float> tolerance_f32(0.05f); RelativeTolerance<half_float::half> tolerance_f16(half(0.2)); constexpr float tolerance_num = 0.07f; /**< Tolerance number */ -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance<float> tolerance_fixed_point(1.f); /** Tolerance for quantized asymmetric operations */ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); @@ -57,8 +55,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); @@ -119,36 +115,32 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types - TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::QS8, 2), // Mismatching fixed point position TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Wrongly reshaped weights }), framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16), - TensorInfo(TensorShape(315U, 271U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(217U, 231U), 1, DataType::F32), TensorInfo(TensorShape(217U, 315U), 1, DataType::F32), })), framework::dataset::make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32), - TensorInfo(TensorShape(271U), 1, DataType::QS8, 2), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(271U), 1, DataType::F32), TensorInfo(TensorShape(271U), 1, DataType::F32), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), - TensorInfo(TensorShape(271U, 3U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), })), - framework::dataset::make("TransposeWeights",{ true, true, true, false, true, true })), - framework::dataset::make("ReshapedWeights",{ false, false, false, false, false, false})), - framework::dataset::make("Expected", { false, false, true, true, false, false })), + framework::dataset::make("TransposeWeights",{ true, true, false, true, true })), + framework::dataset::make("ReshapedWeights",{ false, false, false, false, false})), + framework::dataset::make("Expected", { false, true, true, false, false })), input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected) { Status status = CLFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), @@ -202,52 +194,6 @@ TEST_SUITE_END() template <typename T> using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, CLFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>; diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp index 4e6cf826fa..d066281843 100644 --- a/tests/validation/CL/GEMM.cpp +++ b/tests/validation/CL/GEMM.cpp @@ -53,7 +53,6 @@ RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for compari constexpr float abs_tolerance_f32( 0.0001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for floating point data types in case using relative tolerance fails because of small values */ RelativeTolerance<half_float::half> tolerance_f16(half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ -constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr float tolerance_num = 0.02f; /**< Tolerance number */ const auto data_interleave = framework::dataset::make("M", 8, 14) * framework::dataset::make("N", 7, 14); @@ -62,8 +61,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", { DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, }); } // namespace @@ -84,31 +81,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMInterleave4x4Fixture, framework::DatasetM } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using CLGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMInterleave4x4, int8_t>; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using CLGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMInterleave4x4, int16_t>; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() // INTERLEAVE_4X4 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes), @@ -149,33 +121,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMTranspose1xWFixture, framework::DatasetMo } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using CLGEMMTranspose1xW = CLSynthetizeFunctionWithZeroConstantBorder<CLGEMMTranspose1xWKernel, 16>; -using CLGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMTranspose1xW, int8_t>; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using CLGEMMTranspose1xW = CLSynthetizeFunctionWithZeroConstantBorder<CLGEMMTranspose1xWKernel, 8>; -using CLGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMMTranspose1xW, int16_t>; -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() //TRANSPOSE_1XW TEST_SUITE(Float) @@ -210,46 +155,6 @@ TEST_SUITE_END() template <typename T> using CLGEMMFixedPointFixture = GEMMValidationFixedPointFixture<CLTensor, CLAccessor, CLGEMM, T>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLGEMMFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(OUTPUT_3D) TEST_SUITE(Float) TEST_SUITE(FP32) diff --git a/tests/validation/CL/Im2Col.cpp b/tests/validation/CL/Im2Col.cpp index bfe0665fa9..ac36267c99 100644 --- a/tests/validation/CL/Im2Col.cpp +++ b/tests/validation/CL/Im2Col.cpp @@ -58,20 +58,18 @@ using CLIm2Col = CLSynthetizeFunction<CLIm2ColKernel>; DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::U8), // Unsupported data type TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::F32), // Mismatching data type - TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Bias not supported with QASYMM8 TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Mismatching shapes TensorInfo(TensorShape(10U, 12U, 2U, 2U), 1, DataType::QASYMM8), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(18U, 80U, 1U, 2U), 1, DataType::QASYMM8), })), - framework::dataset::make("HasBias", { true, true, true, true, false, false })), - framework::dataset::make("Expected", { false, false, false, false, true, true })), + framework::dataset::make("HasBias", { true, true, true, false, false })), + framework::dataset::make("Expected", { false, false, false, true, true })), input_info, output_info, has_bias, expected) { diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp index a3db6e60ba..f6a8e7a9fb 100644 --- a/tests/validation/CL/NormalizationLayer.cpp +++ b/tests/validation/CL/NormalizationLayer.cpp @@ -47,10 +47,6 @@ namespace RelativeTolerance<half> tolerance_f16(half(0.2)); RelativeTolerance<float> tolerance_f32(0.05f); -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2); -constexpr AbsoluteTolerance<int16_t> tolerance_qs16(4); - /** Input data set. */ const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), @@ -83,7 +79,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), }), @@ -91,7 +86,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), })), @@ -100,10 +94,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( NormalizationLayerInfo(NormType::IN_MAP_1D, 4), NormalizationLayerInfo(NormType::IN_MAP_2D, 5), NormalizationLayerInfo(NormType::IN_MAP_1D, 5), - NormalizationLayerInfo(NormType::IN_MAP_1D, 5), NormalizationLayerInfo(NormType::CROSS_MAP, 5), })), - framework::dataset::make("Expected", { false, false, false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, false, true })), input_info, output_info, norm_info, expected) { ARM_COMPUTE_EXPECT(bool(CLNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)) == expected, framework::LogLevel::ERRORS); @@ -145,44 +138,6 @@ TEST_SUITE_END() template <typename T> using CLNormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<CLTensor, CLAccessor, CLNormalizationLayer, T>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/CL/PixelWiseMultiplication.cpp b/tests/validation/CL/PixelWiseMultiplication.cpp index 6a71175f51..99b084f6bc 100644 --- a/tests/validation/CL/PixelWiseMultiplication.cpp +++ b/tests/validation/CL/PixelWiseMultiplication.cpp @@ -28,7 +28,6 @@ #include "tests/datasets/ShapeDatasets.h" #include "tests/framework/Macros.h" #include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h" #include "tests/validation/fixtures/PixelWiseMultiplicationFixture.h" namespace arm_compute @@ -39,7 +38,6 @@ namespace validation { namespace { -const float scale_unity = 1.f; const float scale_255 = 1.f / 255.f; // *INDENT-OFF* @@ -81,12 +79,6 @@ using CLPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationF template <typename T> using CLPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>; template <typename T> -using CLPixelWiseMultiplicationToQS8Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, qint8_t>; -template <typename T> -using CLPixelWiseMultiplicationToQS16Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, qint16_t>; -template <typename T> -using CLFixedPointPixelWiseMultiplicationFixture = FixedPointPixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T>; -template <typename T> using CLPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>; TEST_SUITE(CL) @@ -101,9 +93,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid scale TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching data type - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Invalid scale }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -111,9 +100,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS16, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -121,12 +107,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Scale",{ 2.f, 2.f, 2.f, -1.f, 1.f, 1.f, 1.f, 1.f, 3.f})), - framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })), + framework::dataset::make("Scale",{ 2.f, 2.f, 2.f, -1.f, 1.f, 1.f})), + framework::dataset::make("Expected", { true, true, false, false, false, false})), input1_info, input2_info, output_info, scale, expected) { bool has_error = bool(CLPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO)); @@ -155,22 +138,6 @@ TEST_SUITE_END() // PixelWiseMultiplication TEST_SUITE(FixedPointPixelWiseMultiplication) -TEST_SUITE(QS8) - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE_END() // QS16 - TEST_SUITE(Broadcast) PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, BroadcastFixture<float>, PRECOMMIT, SmallShapesBroadcast(), F32, F32, scale_255, TO_NEAREST_UP, VALIDATE(float, 1.f)) TEST_SUITE_END() // Broadcast diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp index 7bd090cb77..b28a5ebca9 100644 --- a/tests/validation/CL/PoolingLayer.cpp +++ b/tests/validation/CL/PoolingLayer.cpp @@ -49,11 +49,6 @@ const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingType framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); -/** Input data set for fixed-point data types */ -const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })), - framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), - framework::dataset::make("ExcludePadding", { true, false })); - /** Input data set for asymmetric data type */ const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(5, 7), Size2D(8, 9) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), @@ -61,8 +56,6 @@ const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::datas constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ -constexpr AbsoluteTolerance<float> tolerance_qs16(6); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit fixed-point type */ -constexpr AbsoluteTolerance<float> tolerance_qs8(3); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit fixed-point type */ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */ } // namespace @@ -74,8 +67,6 @@ TEST_SUITE(PoolingLayer) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), // Invalid parameters @@ -85,8 +76,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), @@ -96,8 +85,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)), PoolingLayerInfo(PoolingType::L2, 3, PadStrideInfo(1, 1, 0, 0)), @@ -105,7 +92,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( PoolingLayerInfo(PoolingType::MAX), PoolingLayerInfo(PoolingType::AVG), })), - framework::dataset::make("Expected", { false, false, false, true, false, false, false, true, false, true })), + framework::dataset::make("Expected", { false, false, false, false, false, true, false, true })), input_info, output_info, pool_info, expected) { ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS); @@ -164,42 +151,6 @@ TEST_SUITE_END() // Float template <typename T> using CLPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLPoolingLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 4))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 4))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 12))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 12))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() // QS16 -TEST_SUITE_END() // fixedPoint - TEST_SUITE(Quantized) template <typename T> diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp index f9204f9424..114529edfd 100644 --- a/tests/validation/CL/SoftmaxLayer.cpp +++ b/tests/validation/CL/SoftmaxLayer.cpp @@ -47,9 +47,6 @@ namespace RelativeTolerance<half> tolerance_f16(half(0.2)); RelativeTolerance<float> tolerance_f32(0.001f); -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance<int16_t> tolerance_fixed_point(2); - /** Tolerance for quantized operations */ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); @@ -59,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::QASYMM8, DataType::F16, DataType::F32, - DataType::QS8, - DataType::QS16, }); } // namespace @@ -106,27 +101,23 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datase DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, // Invalid output quantization info QuantizationInfo(1.f/256, 12)), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 12)), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 12)), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 0)), })), - framework::dataset::make("Expected", { false, false, false, false, false, true, true, true })), + framework::dataset::make("Expected", { false, false, false, false, true, true })), input_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); @@ -176,46 +167,6 @@ TEST_SUITE_END() template <typename T> using CLSoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, CLSoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::SoftmaxLayerTinyShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, CLSoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::SoftmaxLayerTinyShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using CLSoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>; diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp index 0ff95df957..36a5e6fcfb 100644 --- a/tests/validation/CL/WidthConcatenateLayer.cpp +++ b/tests/validation/CL/WidthConcatenateLayer.cpp @@ -133,42 +133,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWidthConcatenateLayerFixture<float>, framewor TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation |