diff options
author | Vidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com> | 2018-06-18 14:40:56 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:53:09 +0000 |
commit | 0fc25454b6ce499b7f89792f91b81a61a42d3182 (patch) | |
tree | 0c988cbbef3f5f1967cc2f77672d05a702191349 /tests/validation/NEON/ConvolutionLayer.cpp | |
parent | f5f0ecca39c905bc8c1f90e7469eed2221ca662f (diff) | |
download | ComputeLibrary-0fc25454b6ce499b7f89792f91b81a61a42d3182.tar.gz |
COMPMID-970 : Remove QS8 / QS16 support
Remove QS8 and QS16 validation and benchmark tests
Change-Id: I566f1474c1fafcb3903115ec2d3a003d73e4c93b
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133762
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'tests/validation/NEON/ConvolutionLayer.cpp')
-rw-r--r-- | tests/validation/NEON/ConvolutionLayer.cpp | 49 |
1 files changed, 0 insertions, 49 deletions
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 2c17487107..747d8d2f62 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -52,7 +52,6 @@ const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute toleranc #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -const AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ /** CNN data types */ @@ -62,8 +61,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType", DataType::F16, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ DataType::F32, - DataType::QS8, - DataType::QS16, DataType::QASYMM8, }); const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -251,52 +248,6 @@ TEST_SUITE_END() template <typename T> using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>; -TEST_SUITE(FixedPoint) -TEST_SUITE(QS8) -// We test for fixed point precision [4,6] -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("FractionalBits", 4, 7)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -// Testing for fixed point position [1,14) -FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::TinyConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("FractionalBits", 1, 14)), - ActivationFunctionsDataset)) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_q); -} -TEST_SUITE_END() -TEST_SUITE_END() - template <typename T> using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>; |