diff options
Diffstat (limited to 'tests/validation/NEON')
23 files changed, 31 insertions, 1387 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 868683259c..289ca4870e 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -60,10 +60,6 @@ AbsoluteTolerance<float> tolerance(DataType data_type, ActivationLayerInfo::Acti case ActivationLayerInfo::ActivationFunction::TANH: switch(data_type) { - case DataType::QS8: - return AbsoluteTolerance<float>(5.f); - case DataType::QS16: - return AbsoluteTolerance<float>(11.f); case DataType::F16: return AbsoluteTolerance<float>(0.01f); default: @@ -82,8 +78,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); /** Input data sets. */ @@ -143,22 +137,16 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), })), - framework::dataset::make("Expected", { false, true, false, false, true })), + framework::dataset::make("Expected", { false, true, false})), input_info, output_info, act_info, expected) { bool is_valid = bool(NEActivationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), act_info)); @@ -210,49 +198,6 @@ TEST_SUITE_END() template <typename T> using NEActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<Tensor, Accessor, NEActivationLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [3,5] because [1,2] and [6,7] ranges cause -// overflowing issues in most of the transcendentals functions. -FIXTURE_DATA_TEST_CASE(RunTiny, NEActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NEActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ActivationDataset), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<Tensor, Accessor, NEActivationLayer, T>; diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp index 98eeaa2f61..b01e5d929d 100644 --- a/tests/validation/NEON/ArithmeticAddition.cpp +++ b/tests/validation/NEON/ArithmeticAddition.cpp @@ -48,10 +48,6 @@ const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::mak DataType::U8)); const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticAdditionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticAdditionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -74,26 +70,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { true, true, false, false, false, false, true })), + framework::dataset::make("Expected", { true, true, false, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -176,44 +166,6 @@ TEST_SUITE_END() template <typename T> using NEArithmeticAdditionFixedPointFixture = ArithmeticAdditionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticAddition, T>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(F16) diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp index 01a107c17d..fc25465e6d 100644 --- a/tests/validation/NEON/ArithmeticSubtraction.cpp +++ b/tests/validation/NEON/ArithmeticSubtraction.cpp @@ -55,10 +55,6 @@ const auto ArithmeticSubtractionS16U8S16Dataset = combine(combine(framework::dat framework::dataset::make("DataType", DataType::S16)); const auto ArithmeticSubtractionU8S16S16Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticSubtractionQS8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataType", DataType::QS8)); -const auto ArithmeticSubtractionQS16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataType", DataType::QS16)); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -78,26 +74,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), })), - framework::dataset::make("Expected", { true, true, false, false, false, false, true })), + framework::dataset::make("Expected", { true, true, false, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -246,48 +236,6 @@ TEST_SUITE_END() template <typename T1, typename T2 = T1, typename T3 = T1> using NEArithmeticSubtractionFixedPointFixture = ArithmeticSubtractionValidationFixedPointFixture<Tensor, Accessor, NEArithmeticSubtraction, T1, T2, T3>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp index 53fd0163ff..3a18a0a93b 100644 --- a/tests/validation/NEON/BatchNormalizationLayer.cpp +++ b/tests/validation/NEON/BatchNormalizationLayer.cpp @@ -48,9 +48,7 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ -constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */ -const auto act_infos = framework::dataset::make("ActivationInfo", +const auto act_infos = framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), @@ -66,7 +64,7 @@ using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))), - framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })), + framework::dataset::make("DataType", { DataType::F32 })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout) { @@ -106,46 +104,30 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(), })), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(5U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f), - ActivationLayerInfo(), - ActivationLayerInfo(), })), - framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, true, true})), + framework::dataset::make("Expected", { true, false, false, false, false, false})), input_info, output_info, mvbg_info, act_info, expected) { const auto &mean_info = mvbg_info; @@ -191,42 +173,6 @@ TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE_END() -TEST_SUITE(Quantized) -template <typename T> -using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>; - -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/Col2Im.cpp b/tests/validation/NEON/Col2Im.cpp index 9f2415d628..e4a52f25dd 100644 --- a/tests/validation/NEON/Col2Im.cpp +++ b/tests/validation/NEON/Col2Im.cpp @@ -43,19 +43,17 @@ TEST_SUITE(Col2Im) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::S64), // Unsupported data type TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Mismatching data type - TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), // Invalid output shape TensorInfo(TensorShape(10U, 12U, 1U, 2U), 1, DataType::F32), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::F32), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F32), })), framework::dataset::make("ConvolvedWidth", { 3, 3, 3, 3, 3 })), framework::dataset::make("ConvolvedHeight", { 4, 4, 4, 4, 4 })), - framework::dataset::make("Expected", { false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, true })), input_info, output_info, convolved_width, convolved_height, expected) { bool status = bool(NECol2Im::validate(&input_info, &output_info, Size2D(convolved_width, convolved_height))); diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 2c17487107..747d8d2f62 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -52,7 +52,6 @@ const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute toleranc #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ /** CNN data types */ @@ -62,8 +61,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -251,52 +248,6 @@ TEST_SUITE_END() template <typename T> using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>; diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp index 3bb6d6f8fc..87d413f202 100644 --- a/tests/validation/NEON/DeconvolutionLayer.cpp +++ b/tests/validation/NEON/DeconvolutionLayer.cpp @@ -102,14 +102,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Non supported data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 4), // Non supported data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0), }), framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QS8, 5), + TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 5), TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11), TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0), diff --git a/tests/validation/NEON/DepthConcatenateLayer.cpp b/tests/validation/NEON/DepthConcatenateLayer.cpp index c3bb8a93b7..0e0b674e41 100644 --- a/tests/validation/NEON/DepthConcatenateLayer.cpp +++ b/tests/validation/NEON/DepthConcatenateLayer.cpp @@ -100,42 +100,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<float>, framewor TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), - framework::dataset::make("DataType", - DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp index ea63a5f52b..2bd3db7075 100644 --- a/tests/validation/NEON/DepthConvertLayer.cpp +++ b/tests/validation/NEON/DepthConvertLayer.cpp @@ -44,19 +44,14 @@ namespace validation namespace { /** Input data sets **/ -const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16)); -const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)); -const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32)); -const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8)); -const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32)); -const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8)); -const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32)); -const auto DepthConvertLayerQS8toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS8), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerQS16toFP32Dataset = combine(framework::dataset::make("DataType", DataType::QS16), framework::dataset::make("DataType", DataType::F32)); -const auto DepthConvertLayerFP32toQS8Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS8)); -const auto DepthConvertLayerFP32toQS16Dataset = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QS16)); -const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7); -const auto DepthConvertLayerFixedPointQuantizedDataset = framework::dataset::make("FractionalBits", 1, 7); +const auto DepthConvertLayerU8toU16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16)); +const auto DepthConvertLayerU8toS16Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16)); +const auto DepthConvertLayerU8toS32Dataset = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32)); +const auto DepthConvertLayerU16toU8Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8)); +const auto DepthConvertLayerU16toU32Dataset = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32)); +const auto DepthConvertLayerS16toU8Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8)); +const auto DepthConvertLayerS16toS32Dataset = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32)); +const auto DepthConvertLayerShiftDataset = framework::dataset::make("Shift", 0, 7); } // namespace TEST_SUITE(NEON) @@ -73,10 +68,6 @@ template <typename T> using NEDepthConvertLayerToU32Fixture = DepthConvertLayerValidationFixture<Tensor, Accessor, NEDepthConvertLayer, T, uint32_t>; template <typename T> using NEDepthConvertLayerToFP32FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, float>; -template <typename T> -using NEDepthConvertLayerToQS8FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, int8_t>; -template <typename T> -using NEDepthConvertLayerToQS16FixedPointFixture = DepthConvertLayerValidationFractionalBitsFixture<Tensor, Accessor, NEDepthConvertLayer, T, int16_t>; TEST_SUITE(U8_to_U16) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), @@ -367,124 +358,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToS32Fixture<int16_t>, frame } TEST_SUITE_END() -TEST_SUITE(Quantized_to_FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - Tensor src = create_tensor<Tensor>(shape, dt, 1, fixed_point_position); - Tensor dst = create_tensor<Tensor>(shape, DataType::F32, 1, fixed_point_position); - - // Create and Configure function - NEDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, NEDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, NEDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToFP32FixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS8toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToFP32FixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerQS16toFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(FP32_to_Quantized) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16 })), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset), - shape, dt, policy, fixed_point_position) -{ - int shift = 0; - - // Create tensors - Tensor src = create_tensor<Tensor>(shape, DataType::F32, 1, fixed_point_position); - Tensor dst = create_tensor<Tensor>(shape, dt, 1, fixed_point_position); - - // Create and Configure function - NEDepthConvertLayer depth_convert; - depth_convert.configure(&src, &dst, policy, shift); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(dst.info()->valid_region(), valid_region); - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS8, NEDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunTinyQS16, NEDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS8, NEDepthConvertLayerToQS8FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmallQS16, NEDepthConvertLayerToQS16FixedPointFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallShapes(), - DepthConvertLayerFP32toQS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerFixedPointQuantizedDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp index d9fd093c8e..2888a6535e 100644 --- a/tests/validation/NEON/DilatedConvolutionLayer.cpp +++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp @@ -47,7 +47,6 @@ const AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for c #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ /** CNN data types */ @@ -57,8 +56,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); } // namespace @@ -203,56 +200,6 @@ TEST_SUITE_END() template <typename T> using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo()))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>; diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp index f700758d21..4995d881cc 100644 --- a/tests/validation/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation/NEON/DirectConvolutionLayer.cpp @@ -42,7 +42,6 @@ namespace validation { namespace { -constexpr AbsoluteTolerance<float> tolerance_qs(1.f); /**< Tolerance for fixed point tests */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance<float> tolerance_fp16(0.01f); /**< Tolerance for half precision floating point tests */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ @@ -59,33 +58,12 @@ const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", combine(framework::dataset::make("PadY", { 0, 3 }), framework::dataset::make("KernelSize", 5)))); -const auto data_pad_qs8 = concat(combine(framework::dataset::make("PadX", 0), - combine(framework::dataset::make("PadY", 0), - framework::dataset::make("KernelSize", 1))), - combine(framework::dataset::make("PadX", { 0, 2 }), - combine(framework::dataset::make("PadY", { 0, 2 }), - framework::dataset::make("KernelSize", 3)))); - const auto data_f32 = combine(datasets::SmallDirectConvolutionShapes(), combine(framework::dataset::make("StrideX", { 1, 3 }), combine(framework::dataset::make("StrideY", { 1, 3 }), combine(data_pad_f32, framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); -const auto data_qs8 = combine(datasets::TinyDirectConvolutionShapes(), - combine(framework::dataset::make("StrideX", { 1, 3 }), - combine(framework::dataset::make("StrideY", { 1, 3 }), - combine(data_pad_qs8, - framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); - -/** Direct convolution QS16 data set. */ -const auto data_qs16 = combine(datasets::TinyDirectConvolutionShapes(), - combine(framework::dataset::make("StrideX", { 1, 3 }), - combine(framework::dataset::make("StrideY", { 1, 3 }), - combine(framework::dataset::make("PadX", 0), - combine(framework::dataset::make("PadY", 0), - combine(framework::dataset::make("KernelSize", 1), - framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))))); /** Activation function Dataset*/ const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", { @@ -205,30 +183,6 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) }); -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(data_qs8, framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - QuantizedActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// We test for fixed point precision [4,13] -FIXTURE_DATA_TEST_CASE(Run, NEDirectConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(data_qs16, framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 4, 14)), - QuantizedActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/FixedPoint/FixedPoint.cpp b/tests/validation/NEON/FixedPoint/FixedPoint.cpp deleted file mode 100644 index a583b587ae..0000000000 --- a/tests/validation/NEON/FixedPoint/FixedPoint.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "FixedPointTarget.h" - -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/Tensor.h" -#include "arm_compute/runtime/TensorAllocator.h" -#include "tests/NEON/Accessor.h" -#include "tests/PaddingCalculator.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -constexpr AbsoluteTolerance<float> tolerance_exp_qs8(0.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential) for DataType::QS8 */ -constexpr AbsoluteTolerance<float> tolerance_exp_qs16(1.0f); /**< Tolerance value for comparing reference's output against implementation's output (exponential) for DataType::QS16 */ -constexpr AbsoluteTolerance<float> tolerance_invsqrt_qs8(4.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) for DataType::QS8 */ -constexpr AbsoluteTolerance<float> tolerance_invsqrt_qs16(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (inverse square-root) for DataType::QS16 */ -constexpr AbsoluteTolerance<float> tolerance_log_qs8(5.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) for DataType::QS8 */ -constexpr AbsoluteTolerance<float> tolerance_log_qs16(7.0f); /**< Tolerance value for comparing reference's output against implementation's output (logarithm) for DataType::QS16 */ -} // namespace - -TEST_SUITE(NEON) -TEST_SUITE(FixedPoint) -template <typename T> -using NEFixedPointFixture = FixedPointValidationFixture<Tensor, Accessor, T>; - -TEST_SUITE(QS8) -TEST_SUITE(Exp) - -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::EXP)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_exp_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Invsqrt) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_invsqrt_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Log) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FixedPointOp", FixedPointOp::LOG)), - framework::dataset::make("FractionalBits", 3, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_log_qs8, 0); -} -TEST_SUITE_END() -TEST_SUITE_END() - -TEST_SUITE(QS16) -TEST_SUITE(Exp) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FixedPointOp", FixedPointOp::EXP)), - framework::dataset::make("FractionalBits", 1, 15))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_exp_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Invsqrt) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("Shape", TensorShape(8192U)), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FixedPointOp", FixedPointOp::INV_SQRT)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_invsqrt_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE(Log) -FIXTURE_DATA_TEST_CASE(RunSmall, NEFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small1DShapes(), framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FixedPointOp", FixedPointOp::LOG)), - framework::dataset::make("FractionalBits", 4, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_log_qs16, 0); -} -TEST_SUITE_END() -TEST_SUITE_END() - -TEST_SUITE_END() -TEST_SUITE_END() -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/NEON/FixedPoint/FixedPointTarget.h b/tests/validation/NEON/FixedPoint/FixedPointTarget.h deleted file mode 100644 index 31ccc0817e..0000000000 --- a/tests/validation/NEON/FixedPoint/FixedPointTarget.h +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET -#define ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET - -#include "arm_compute/core/NEON/NEFixedPoint.h" - -#include "tests/Globals.h" -#include "tests/Types.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -template <typename TensorType, typename AccessorType, typename T> -void compute_target_impl(const TensorShape &shape, DataType dt, FixedPointOp op, int fixed_point_position, TensorType &src, TensorType &dst) -{ - Window window; - - switch(dt) - { - case DataType::QS8: - { - constexpr unsigned int num_elems_processed_per_iteration = 16; - window = calculate_max_window(*src.info(), Steps(num_elems_processed_per_iteration)); - AccessWindowHorizontal input_access(src.info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(dst.info(), 0, num_elems_processed_per_iteration); - update_window_and_padding(window, input_access, output_access); - break; - } - case DataType::QS16: - { - constexpr unsigned int num_elems_processed_per_iteration = 8; - window = calculate_max_window(*src.info(), Steps(num_elems_processed_per_iteration)); - AccessWindowHorizontal input_access(src.info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(dst.info(), 0, num_elems_processed_per_iteration); - update_window_and_padding(window, input_access, output_access); - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - - int min; - int max; - switch(op) - { - case FixedPointOp::EXP: - { - // Fill tensors. Keep the range between [-1.0, 1.0) so the result won't - // overflow. - min = -(1 << (fixed_point_position - 1)); - max = (1 << (fixed_point_position - 1)); - break; - } - case FixedPointOp::INV_SQRT: - { - if(dt == DataType::QS8) - { - // Fill tensors. Keep the range between [1, 127). - min = 1; - max = 127; - } - else - { - // Fill tensors. Keep the range between [1, 0x7FFF) - min = 1; - max = 0x7FFF; - } - break; - } - case FixedPointOp::LOG: - { - if(dt == DataType::QS8) - { - // Fill tensors. Keep the range between [(1 << (fixed_point_position - 1), 63) so the result won't - // overflow. E.g. for Q2.5 ln(0.001) = -6.9, which cannot be represented. - min = (1 << (fixed_point_position - 1)); - max = 0x3F; - } - else - { - // Fill tensors. Keep the range between [(1 << (fixed_point_position - 1), 0x3FFF) so the result won't - // overflow. - min = (1 << (fixed_point_position - 1)); - max = 0x3FFF; - } - break; - } - case FixedPointOp::RECIPROCAL: - { - if(dt == DataType::QS8) - { - // Fill tensors. Keep the range between [15, 100) so the result won't - // overflow. E.g. for Q2.5 reciprocal(0.001) = 1000, which cannot be represented. - min = 15; - max = 0x7F; - } - else - { - // Fill tensors. Keep the range between [15, 0x7FFF) so the result won't - // overflow. - min = 15; - max = 0x7FFF; - } - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - - std::uniform_int_distribution<> distribution(min, max); - library->fill(AccessorType(src), distribution, 0); - - Iterator input(&src, window); - Iterator output(&dst, window); - - const auto loop_function = [&](const Coordinates & id) - { - switch(dt) - { - case DataType::QS8: - { - const qint8x16_t qs8in = vld1q_s8(reinterpret_cast<const qint8_t *>(input.ptr())); - switch(op) - { - case FixedPointOp::EXP: - { - // Use saturated exp - vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vqexpq_qs8(qs8in, fixed_point_position)); - break; - } - case FixedPointOp::INV_SQRT: - { - vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vqinvsqrtq_qs8(qs8in, fixed_point_position)); - break; - } - case FixedPointOp::LOG: - { - vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vlogq_qs8(qs8in, fixed_point_position)); - break; - } - case FixedPointOp::RECIPROCAL: - { - vst1q_s8(reinterpret_cast<qint8_t *>(output.ptr()), vrecipq_qs8(qs8in, fixed_point_position)); - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - break; - } - case DataType::QS16: - { - const qint16x8_t qs16in = vld1q_qs16(reinterpret_cast<const qint16_t *>(input.ptr())); - switch(op) - { - case FixedPointOp::EXP: - { - // Use saturated exp - vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqexpq_qs16(qs16in, fixed_point_position)); - break; - } - case FixedPointOp::INV_SQRT: - { - vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqinvsqrtq_qs16(qs16in, fixed_point_position)); - break; - } - case FixedPointOp::LOG: - { - vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vlogq_qs16(qs16in, fixed_point_position)); - break; - } - case FixedPointOp::RECIPROCAL: - { - vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqrecipq_qs16(qs16in, fixed_point_position)); - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - break; - } - default: - ARM_COMPUTE_ERROR("Not Supported"); - break; - } - }; - - execute_window_loop(window, loop_function, input, output); -} -} // namespace -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_FIXED_POINT_NEON_TARGET */ diff --git a/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp b/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp deleted file mode 100644 index d7954dec64..0000000000 --- a/tests/validation/NEON/FixedPointPixelWiseMultiplication.cpp +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h" -#include "tests/NEON/Accessor.h" -#include "tests/datasets/ConvertPolicyDataset.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Macros.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/FixedPointPixelWiseMultiplicationFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace -{ -const float tolerance = 1.f; -const float scale_255 = 1.f / 255.f; -const float scale_unity = 1.f; - -// *INDENT-OFF* -// clang-format off -#define FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, SCALE, RP, FPP_START, FPP_END) \ - FIXTURE_DATA_TEST_CASE(TEST_NAME, NEFixedPointPixelWiseMultiplication##FIXTURE, framework::DatasetMode::MODE, \ - combine(combine(combine(combine(combine(combine( \ - datasets::SHAPES, \ - framework::dataset::make("DataType1", DataType::DT1)), \ - framework::dataset::make("DataType2", DataType::DT2)), \ - framework::dataset::make("Scale", std::move(SCALE))), \ - datasets::ConvertPolicies()), \ - framework::dataset::make("RoundingPolicy", RoundingPolicy::RP)), \ - framework::dataset::make("FixedPointPosition", FPP_START, FPP_END))) \ - { \ - validate(Accessor(_target), _reference); \ - } - -#define FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, RP, FPP, TOLERANCE) \ - FIXTURE_DATA_TEST_CASE(TEST_NAME, NEFixedPointPixelWiseMultiplication##FIXTURE, framework::DatasetMode::MODE, \ - combine(combine(combine(combine(combine(combine( \ - datasets::SHAPES, \ - framework::dataset::make("DataType1", DataType::DT1)), \ - framework::dataset::make("DataType2", DataType::DT2)), \ - framework::dataset::make("Scale", 1.f / static_cast<float>(1 << (FPP)))), \ - datasets::ConvertPolicies()), \ - framework::dataset::make("RoundingPolicy", RoundingPolicy::RP)), \ - framework::dataset::make("FixedPointPosition", FPP))) \ - { \ - validate(Accessor(_target), _reference, AbsoluteTolerance<float>(TOLERANCE), 0.f); \ - } -// clang-format on -// *INDENT-ON* -} // namespace - -template <typename T> -using NEFixedPointPixelWiseMultiplicationFixture = FixedPointPixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T>; - -TEST_SUITE(NEON) -TEST_SUITE(FixedPointPixelWiseMultiplication) - -TEST_SUITE(QS8) - -TEST_SUITE(Scale255) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, scale_255, TO_NEAREST_UP, 1, 7) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, scale_255, TO_NEAREST_UP, 1, 7) -TEST_SUITE_END() // Scale255 - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, scale_unity, TO_ZERO, 1, 7) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE(ScaleOther) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther1, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 1, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther2, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 2, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther3, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 3, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther4, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 4, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther5, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 5, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther6, Fixture<qint8_t>, PRECOMMIT, TinyShapes(), QS8, QS8, TO_ZERO, 6, tolerance) - -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther1, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 1, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther2, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 2, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther3, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 3, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther4, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 4, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther5, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 5, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunSmallOther6, Fixture<qint8_t>, NIGHTLY, SmallShapes(), QS8, QS8, TO_ZERO, 6, tolerance) -TEST_SUITE_END() // ScaleOther - -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) - -TEST_SUITE(Scale255) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, scale_255, TO_NEAREST_UP, 1, 15) -TEST_SUITE_END() // Scale255 - -TEST_SUITE(ScaleUnity) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunTiny, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, Fixture<qint16_t>, NIGHTLY, SmallShapes(), QS16, QS16, scale_unity, TO_ZERO, 1, 15) -TEST_SUITE_END() // ScaleUnity - -TEST_SUITE(ScaleOther) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther1, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 1, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther2, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 2, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther3, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 3, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther4, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 4, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther5, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 5, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther6, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 6, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther7, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 7, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther8, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 8, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther9, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 9, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther10, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 10, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther11, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 11, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther12, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 12, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther13, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 13, tolerance) -FP_PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE_OTHER(RunTinyOther14, Fixture<qint16_t>, PRECOMMIT, TinyShapes(), QS16, QS16, TO_ZERO, 14, tolerance) -TEST_SUITE_END() // ScaleOther - -TEST_SUITE_END() // QS16 - -TEST_SUITE_END() -TEST_SUITE_END() -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/NEON/Flatten.cpp b/tests/validation/NEON/Flatten.cpp index 332716925a..86f1a11262 100644 --- a/tests/validation/NEON/Flatten.cpp +++ b/tests/validation/NEON/Flatten.cpp @@ -80,38 +80,6 @@ TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE_END() -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEFlattenLayerFixture<int8_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFlattenLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS8))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEFlattenLayerFixture<int16_t>, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::Tiny3DShapes(), datasets::Tiny4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFlattenLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::QS16))) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp index ed0edcd3be..3adcf61dc9 100644 --- a/tests/validation/NEON/FullyConnectedLayer.cpp +++ b/tests/validation/NEON/FullyConnectedLayer.cpp @@ -47,8 +47,6 @@ constexpr RelativeTolerance<float> tolerance_f32(0.01f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr RelativeTolerance<float> tolerance_f16(0.01f); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance<float> tolerance_fixed_point(1.f); /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -57,8 +55,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); const auto FullyConnectedParameters = combine(framework::dataset::make("TransposeWeights", { false, true }), framework::dataset::make("ReshapeWeights", { false, true })); @@ -119,7 +115,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types - TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::QS8, 2), // Mismatching fixed point position TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions @@ -127,7 +122,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32), }), framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16), - TensorInfo(TensorShape(315U, 271U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), TensorInfo(TensorShape(217U, 315U), 1, DataType::F32), @@ -135,7 +129,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( TensorInfo(TensorShape(192U, 192U), 1, DataType::F32), })), framework::dataset::make("BiasInfo",{ TensorInfo(TensorShape(271U), 1, DataType::F32), - TensorInfo(TensorShape(271U), 1, DataType::QS8, 2), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(192U), 1, DataType::F32), TensorInfo(TensorShape(271U), 1, DataType::F32), @@ -143,16 +136,15 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( TensorInfo(TensorShape(192U), 1, DataType::F32), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), - TensorInfo(TensorShape(271U, 3U), 1, DataType::QS8, 3), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), TensorInfo(TensorShape(271U, 3U), 1, DataType::F32), TensorInfo(TensorShape(192U, 4U), 1, DataType::F32), })), - framework::dataset::make("TransposeWeights",{ true, true, true, false, true, true, true })), - framework::dataset::make("ReshapedWeights",{ false, false, false, false, false, false , false})), - framework::dataset::make("Expected", { false, false, true, true, false, false, true })), + framework::dataset::make("TransposeWeights",{ true, true, false, true, true, true })), + framework::dataset::make("ReshapedWeights",{ false, false, false, false, false , false})), + framework::dataset::make("Expected", { false, true, true, false, false, true })), input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected) { Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), transpose_weights, reshaped_weights); @@ -203,52 +195,6 @@ TEST_SUITE_END() template <typename T> using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, NEFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NEFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp index bfa2db443a..e0f63a8a2d 100644 --- a/tests/validation/NEON/GEMM.cpp +++ b/tests/validation/NEON/GEMM.cpp @@ -50,7 +50,6 @@ namespace validation namespace { constexpr AbsoluteTolerance<float> tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ -constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -59,8 +58,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); const auto data_interleave = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12); @@ -82,33 +79,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMo } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using NEGEMMTranspose1xW = NESynthetizeFunctionWithZeroConstantBorder<NEGEMMTranspose1xWKernel, 16>; -using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<Tensor, Accessor, NEGEMMTranspose1xW, int8_t>; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using NEGEMMTranspose1xW = NESynthetizeFunctionWithZeroConstantBorder<NEGEMMTranspose1xWKernel, 8>; -using NEGEMMTranspose1xWFixture = GEMMTranspose1xWValidationFixedPointFixture<Tensor, Accessor, NEGEMMTranspose1xW, int16_t>; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMTranspose1xWFixture, framework::DatasetMode::PRECOMMIT, data_transpose * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() // TRANSPOSE_1XW TEST_SUITE(INTERLEAVE_4X4) @@ -123,31 +93,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetM } TEST_SUITE_END() // FP32 -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<Tensor, Accessor, NEGEMMInterleave4x4, int8_t>; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS8) - * framework::dataset::make("FractionalBits", 1, 7)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -using NEGEMMInterleave4x4Fixture = GEMMInterleave4x4ValidationFixedPointFixture<Tensor, Accessor, NEGEMMInterleave4x4, int16_t>; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleave4x4Fixture, framework::DatasetMode::PRECOMMIT, data_interleave * - framework::dataset::make("DataType", DataType::QS16) - * framework::dataset::make("FractionalBits", 1, 14)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() // INTERLEAVE_4X4 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMDataset(), datasets::LargeGEMMDataset()), CNNDataTypes), @@ -211,46 +156,6 @@ TEST_SUITE_END() template <typename T> using NEGEMMFixedPointFixture = GEMMValidationFixedPointFixture<Tensor, Accessor, NEGEMM, T>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 7))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::TinyGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallGEMMDataset(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index ed24d618f5..eb350e1029 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -102,7 +102,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::c // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4 - TensorInfo(TensorShape(21U, 13U), 1, DataType::QS8, 2), // Mismatching data type + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32, 2), // Mismatching data type TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), diff --git a/tests/validation/NEON/Im2Col.cpp b/tests/validation/NEON/Im2Col.cpp index 3a45fa7ae4..d2e7875853 100644 --- a/tests/validation/NEON/Im2Col.cpp +++ b/tests/validation/NEON/Im2Col.cpp @@ -52,20 +52,18 @@ TEST_SUITE(Im2Col) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::U8), // Unsupported data type TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::F32), // Mismatching data type - TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Bias not supported with QASYMM8 TensorInfo(TensorShape(10U, 12U, 2U), 1, DataType::QASYMM8), // Mismatching shapes TensorInfo(TensorShape(10U, 12U, 2U, 2U), 1, DataType::QASYMM8), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(3U, 3U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(3U, 4U, 10U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(18U, 80U, 1U, 2U), 1, DataType::QASYMM8), })), - framework::dataset::make("HasBias", { true, true, true, true, false, false })), - framework::dataset::make("Expected", { false, false, false, false, false, true })), + framework::dataset::make("HasBias", { true, true, true, false, false })), + framework::dataset::make("Expected", { false, false, false, false, true })), input_info, output_info, has_bias, expected) { bool status = bool(NEIm2Col::validate(&input_info, &output_info, Size2D(3U, 3U), PadStrideInfo(), has_bias, false, false)); diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp index cf63675246..8c66611f49 100644 --- a/tests/validation/NEON/NormalizationLayer.cpp +++ b/tests/validation/NEON/NormalizationLayer.cpp @@ -48,9 +48,6 @@ namespace constexpr AbsoluteTolerance<float> tolerance_f16(0.001f); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance<int8_t> tolerance_qs8(2); -constexpr AbsoluteTolerance<int16_t> tolerance_qs16(4); /** Input data set. */ const auto NormalizationDatasetQS = combine(combine(combine(combine(datasets::TinyShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), @@ -76,7 +73,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), }), @@ -84,7 +80,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), })), @@ -93,10 +88,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( NormalizationLayerInfo(NormType::IN_MAP_1D, 4), NormalizationLayerInfo(NormType::IN_MAP_2D, 5), NormalizationLayerInfo(NormType::IN_MAP_1D, 5), - NormalizationLayerInfo(NormType::IN_MAP_1D, 5), NormalizationLayerInfo(NormType::CROSS_MAP, 1), })), - framework::dataset::make("Expected", { false, false, false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, false, true })), input_info, output_info, norm_info, expected) { bool is_valid = bool(NENormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)); @@ -141,44 +135,6 @@ TEST_SUITE_END() template <typename T> using NENormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture<Tensor, Accessor, NENormalizationLayer, T>; -TEST_SUITE(Quantized) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, NENormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NENormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDatasetQS, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/PixelWiseMultiplication.cpp b/tests/validation/NEON/PixelWiseMultiplication.cpp index 9304c8bad0..77da473e0d 100644 --- a/tests/validation/NEON/PixelWiseMultiplication.cpp +++ b/tests/validation/NEON/PixelWiseMultiplication.cpp @@ -114,10 +114,6 @@ using NEPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationF template <typename T> using NEPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, float>; template <typename T> -using NEPixelWiseMultiplicationToQS8Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, qint8_t>; -template <typename T> -using NEPixelWiseMultiplicationToQS16Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, qint16_t>; -template <typename T> using NEPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, float>; TEST_SUITE(NEON) @@ -132,9 +128,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid scale TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching data type - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Invalid scale + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data type }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -142,9 +136,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS16, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), @@ -152,12 +144,10 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), })), - framework::dataset::make("Scale",{ scale_unity, scale_unity, scale_unity, -1.f, scale_unity, scale_unity, scale_unity, scale_unity, 3.f})), - framework::dataset::make("Expected", { true, true, false, false, false, false, false, false, false })), + framework::dataset::make("Scale",{ scale_unity, scale_unity, scale_unity, -1.f, scale_unity, scale_unity, scale_unity})), + framework::dataset::make("Expected", { true, true, false, false, false, false, false })), input1_info, input2_info, output_info, scale, expected) { bool has_error = bool(NEPixelWiseMultiplication::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), scale, ConvertPolicy::WRAP, RoundingPolicy::TO_ZERO)); diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index b44f945eb7..8762f1f7cc 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -65,8 +65,6 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value f #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for float types */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance<float> tolerance_qs8(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */ -constexpr AbsoluteTolerance<float> tolerance_qs16(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */ } // namespace @@ -80,8 +78,6 @@ TEST_SUITE(PoolingLayer) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11), // Window shrink TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling @@ -90,8 +86,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), @@ -100,15 +94,13 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)), PoolingLayerInfo(PoolingType::AVG), PoolingLayerInfo(PoolingType::MAX), PoolingLayerInfo(PoolingType::AVG), })), - framework::dataset::make("Expected", { false, false, false, false, false, false, true, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, true, false, false, true })), input_info, output_info, pool_info, expected) { bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)); @@ -170,42 +162,6 @@ TEST_SUITE_END() // Float template <typename T> using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<Tensor, Accessor, NEPoolingLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 5))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 5))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 13))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 13))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() // QS16 -TEST_SUITE_END() // FixedPoint - TEST_SUITE(Quantized) template <typename T> diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index e091c0268a..4a1aa23ac1 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -47,8 +47,6 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.000001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance<float> tolerance_f16(0.0001f); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ -/** Tolerance for fixed point operations */ -constexpr AbsoluteTolerance<int16_t> tolerance_fixed_point(2); /** Tolerance for quantized operations */ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); @@ -60,8 +58,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, }); } // namespace @@ -101,15 +97,13 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datase DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { false, false, false, true })), + framework::dataset::make("Expected", { false, false, true })), input_info, output_info, expected) { bool is_valid = bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))); @@ -162,46 +156,6 @@ TEST_SUITE_END() template <typename T> using NESoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<Tensor, Accessor, NESoftmaxLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 -FIXTURE_DATA_TEST_CASE(RunTiny, NESoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SoftmaxLayerTinyShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", - DataType::QS8)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14 -FIXTURE_DATA_TEST_CASE(RunTiny, NESoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SoftmaxLayerTinyShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SoftmaxLayerSmallShapes(), - framework::dataset::make("DataType", - DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_fixed_point); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using NESoftmaxLayerQuantizedFixture = SoftmaxValidationQuantizedFixture<Tensor, Accessor, NESoftmaxLayer, T>; |