From 0fc25454b6ce499b7f89792f91b81a61a42d3182 Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Mon, 18 Jun 2018 14:40:56 +0100 Subject: COMPMID-970 : Remove QS8 / QS16 support Remove QS8 and QS16 validation and benchmark tests Change-Id: I566f1474c1fafcb3903115ec2d3a003d73e4c93b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133762 Tested-by: Jenkins Reviewed-by: Pablo Tello --- tests/validation/NEON/PoolingLayer.cpp | 46 +--------------------------------- 1 file changed, 1 insertion(+), 45 deletions(-) (limited to 'tests/validation/NEON/PoolingLayer.cpp') diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index b44f945eb7..8762f1f7cc 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -65,8 +65,6 @@ constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value f #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC constexpr AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for float types */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -constexpr AbsoluteTolerance tolerance_qs8(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */ -constexpr AbsoluteTolerance tolerance_qs16(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */ constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */ } // namespace @@ -80,8 +78,6 @@ TEST_SUITE(PoolingLayer) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11), // Window shrink TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling @@ -90,8 +86,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), @@ -99,8 +93,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), - PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)), @@ -108,7 +100,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( PoolingLayerInfo(PoolingType::MAX), PoolingLayerInfo(PoolingType::AVG), })), - framework::dataset::make("Expected", { false, false, false, false, false, false, true, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, true, false, false, true })), input_info, output_info, pool_info, expected) { bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)); @@ -170,42 +162,6 @@ TEST_SUITE_END() // Float template using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 5))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS8))), - framework::dataset::make("FractionalBits", 1, 5))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs8); -} -TEST_SUITE_END() // QS8 - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(RunTiny, NEPoolingLayerFixedPointFixture, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 13))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS, - framework::dataset::make("DataType", DataType::QS16))), - framework::dataset::make("FractionalBits", 1, 13))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_qs16); -} -TEST_SUITE_END() // QS16 -TEST_SUITE_END() // FixedPoint - TEST_SUITE(Quantized) template -- cgit v1.2.1