From 04a0706dddc6ca24cb80e3e0789c6b0f54c48b28 Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Tue, 17 Nov 2020 14:09:01 +0000 Subject: COMPMID-3979 Sanitise Padding Removal epic * Add missing padding immutability asserts in all relevant CL kernels * Remove unnecessary zero padding validation tests. Change-Id: If93f9ccbc988e0286f5e7b135f812141476d5da0 Signed-off-by: SiCong Li Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4446 Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- tests/validation/CL/GEMMLowp.cpp | 39 --------------------------------------- 1 file changed, 39 deletions(-) (limited to 'tests/validation/CL/GEMMLowp.cpp') diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp index 00f831b2e2..5a1971b54c 100644 --- a/tests/validation/CL/GEMMLowp.cpp +++ b/tests/validation/CL/GEMMLowp.cpp @@ -47,25 +47,6 @@ namespace validation namespace { constexpr AbsoluteTolerance tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ - -bool validate_output_stage_zero_padding(const TensorShape shape, const DataType dt, const GEMMLowpOutputStageType type) -{ - // Create tensors - CLTensor src = create_tensor(shape, DataType::S32, 1); - CLTensor bias = create_tensor(TensorShape(shape.x()), DataType::S32, 1); - CLTensor dst = create_tensor(shape, dt, 1); - - GEMMLowpOutputStageInfo info; - info.type = type; - info.output_data_type = dt; - std::tie(info.gemmlowp_min_bound, info.gemmlowp_max_bound) = quantization::get_min_max_values_from_quantized_data_type(dt); - - // Create and configure function - CLGEMMLowpOutputStage output_stage; - output_stage.configure(&src, &bias, &dst, info); - - return src.info()->padding().empty() && bias.info()->padding().empty() && dst.info()->padding().empty(); -} } TEST_SUITE(CL) TEST_SUITE(GEMMLowp) @@ -147,13 +128,6 @@ TEST_SUITE(OutputStage) TEST_SUITE(QuantizeDownInt32Scale) -DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })), - shape, data_type) -{ - bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN); - ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS); -} - TEST_SUITE(QASYMM8) const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2, @@ -212,12 +186,6 @@ TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // QuantizeDownInt32Scale TEST_SUITE(QuantizeDownInt32ScaleByFixedPoint) -DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM16 })), - shape, data_type) -{ - bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT); - ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS); -} TEST_SUITE(QASYMM8) @@ -353,13 +321,6 @@ TEST_SUITE_END() // QuantizeDownInt32ScaleByFixedPoint TEST_SUITE(QuantizeDownInt32ScaleByFloat) -DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })), - shape, data_type) -{ - bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT); - ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS); -} - TEST_SUITE(QASYMM8) using CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture = GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture; -- cgit v1.2.1