From d175ecea20f5c9e3bff07f7c5d3e4bb5519f7cb9 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 30 Jul 2020 23:39:32 +0100 Subject: COMPMID-3324: Remove configuration tests Remove configuation tests that use the default data shapes. There is no need to run them since configure will run as part of the actual validation run. Signed-off-by: Michalis Spyrou Change-Id: If6d88a6ba5e9463fa8c615fcf76a5c07d3049d53 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3638 Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- tests/validation/CL/GEMMLowp.cpp | 103 --------------------------------------- 1 file changed, 103 deletions(-) (limited to 'tests/validation/CL/GEMMLowp.cpp') diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp index 29649d8c9f..08227078bc 100644 --- a/tests/validation/CL/GEMMLowp.cpp +++ b/tests/validation/CL/GEMMLowp.cpp @@ -54,27 +54,6 @@ TEST_SUITE(GEMMLowp) TEST_SUITE(MatrixMultiplyCore) using CLGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture; -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset(), - shape_a, shape_b, shape_c, a_offset, b_offset) -{ - // Create tensors - CLTensor a = create_tensor(shape_a, DataType::QASYMM8); - CLTensor b = create_tensor(shape_b, DataType::QASYMM8); - CLTensor c = create_tensor(shape_c, DataType::S32); - - a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset)); - b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset)); - - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Create and configure function - CLGEMMLowpMatrixMultiplyCore gemmlowp_mm; - // TODO (giaiod01) COMPMID-1672 - Extending the test to validate add bias in offset contribution - gemmlowp_mm.configure(&a, &b, nullptr, &c); -} - FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset()) { // Validate output @@ -217,47 +196,6 @@ const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framewo using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture; -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), - shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) -{ - TensorShape shape_bias(shape[0]); - - // Create tensors - CLTensor in = create_tensor(shape, DataType::S32); - CLTensor bias = create_tensor(shape_bias, DataType::S32); - CLTensor out = create_tensor(shape, DataType::QASYMM8); - - ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Create and configure function - CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage; - output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); - - // Validate valid region input and output - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(in.info()->valid_region(), valid_region); - validate(out.info()->valid_region(), valid_region); - - // Validate valid region bias - if(add_bias) - { - const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); - validate(bias.info()->valid_region(), valid_region_bias); - } - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding(); - validate(in.info()->padding(), padding); - validate(out.info()->padding(), padding); - - if(add_bias) - { - validate(bias.info()->padding(), padding); - } -} - FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases)) { @@ -297,47 +235,6 @@ const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framewor using CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture = GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture; -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_by_fixedpoint_cases), - shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) -{ - TensorShape shape_bias(shape[0]); - - // Create tensors - CLTensor in = create_tensor(shape, DataType::S32); - CLTensor bias = create_tensor(shape_bias, DataType::S32); - CLTensor out = create_tensor(shape, DataType::QASYMM8_SIGNED); - - ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Create and configure function - CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage; - output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); - - // Validate valid region input and output - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(in.info()->valid_region(), valid_region); - validate(out.info()->valid_region(), valid_region); - - // Validate valid region bias - if(add_bias) - { - const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); - validate(bias.info()->valid_region(), valid_region_bias); - } - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding(); - validate(in.info()->padding(), padding); - validate(out.info()->padding(), padding); - - if(add_bias) - { - validate(bias.info()->padding(), padding); - } -} - FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_by_fixedpoint_cases)) { -- cgit v1.2.1