From 0fc25454b6ce499b7f89792f91b81a61a42d3182 Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Mon, 18 Jun 2018 14:40:56 +0100 Subject: COMPMID-970 : Remove QS8 / QS16 support Remove QS8 and QS16 validation and benchmark tests Change-Id: I566f1474c1fafcb3903115ec2d3a003d73e4c93b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133762 Tested-by: Jenkins Reviewed-by: Pablo Tello --- tests/validation/CL/BatchNormalizationLayer.cpp | 58 +------------------------ 1 file changed, 2 insertions(+), 56 deletions(-) (limited to 'tests/validation/CL/BatchNormalizationLayer.cpp') diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp index f6dc6b377c..de775bf807 100644 --- a/tests/validation/CL/BatchNormalizationLayer.cpp +++ b/tests/validation/CL/BatchNormalizationLayer.cpp @@ -46,8 +46,6 @@ namespace { constexpr AbsoluteTolerance tolerance_f32(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ constexpr AbsoluteTolerance tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ -constexpr AbsoluteTolerance tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ -constexpr AbsoluteTolerance tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */ const auto act_infos = framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), @@ -65,7 +63,7 @@ using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))), - framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })), + framework::dataset::make("DataType", { DataType::F16, DataType::F32 })), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout) { @@ -105,50 +103,34 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Unsupported fused activation TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), - TensorInfo(), })), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(5U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F32), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), - TensorInfo(TensorShape(2U), 1, DataType::QS8, 2), })), framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f), - ActivationLayerInfo(), - ActivationLayerInfo(), })), - framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, false, true, true})), + framework::dataset::make("Expected", { true, false, false, false, false, false, false})), input_info, output_info, mvbg_info, act_info, expected) { const auto &mean_info = mvbg_info; @@ -189,42 +171,6 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture, framework TEST_SUITE_END() TEST_SUITE_END() -TEST_SUITE(Quantized) -template -using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture; - -TEST_SUITE(QS8) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS8)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 6))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs8, 0); -} -TEST_SUITE_END() - -TEST_SUITE(QS16) -FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(), - framework::dataset::make("UseBeta", false)), - framework::dataset::make("UseGamma", false)), - framework::dataset::make("ActivationInfo", ActivationLayerInfo())), - framework::dataset::make("DataType", DataType::QS16)), - framework::dataset::make("DataLayout", DataLayout::NCHW)), - framework::dataset::make("FractionalBits", 1, 14))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_qs16, 0); -} -TEST_SUITE_END() - -TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation -- cgit v1.2.1