From 689c968239180eda4263e34c3d450093d4a0450d Mon Sep 17 00:00:00 2001 From: Luca Foschiani Date: Wed, 26 Feb 2020 14:30:14 +0000 Subject: COMPMID-2967 Add support for QASYMM8_SIGNED in CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel Signed-off-by: Luca Foschiani Change-Id: I4f7918630ea95fc28597b3d7b189f3d8fd35aef8 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2890 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- tests/validation/CL/GEMMLowp.cpp | 79 ---------------------------------------- 1 file changed, 79 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp index 94621b4393..3d7c76aa2b 100644 --- a/tests/validation/CL/GEMMLowp.cpp +++ b/tests/validation/CL/GEMMLowp.cpp @@ -146,86 +146,7 @@ TEST_SUITE_END() // InputOutput3D TEST_SUITE_END() // MatrixMultiplyCore TEST_SUITE(OutputStage) -TEST_SUITE(QuantizeDownInt32ToUint8Scale) -const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2, - 3) - * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true }); - -const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, - 2) - * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 173) * framework::dataset::make("addBias", { false, true }); - -using CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture; - -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases), - shape, result_offset, result_mult_int, result_shift, min, max, add_bias) -{ - TensorShape shape_bias(shape[0]); - - // Create tensors - CLTensor in = create_tensor(shape, DataType::S32); - CLTensor bias = create_tensor(shape_bias, DataType::S32); - CLTensor out = create_tensor(shape, DataType::QASYMM8); - - ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Create and configure function - CLGEMMLowpQuantizeDownInt32ToUint8Scale output_stage; - output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max); - - // Validate valid region input and output - const ValidRegion valid_region = shape_to_valid_region(shape); - validate(in.info()->valid_region(), valid_region); - validate(out.info()->valid_region(), valid_region); - - // Validate valid region bias - if(add_bias) - { - const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); - validate(bias.info()->valid_region(), valid_region_bias); - } - - // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding(); - validate(in.info()->padding(), padding); - validate(out.info()->padding(), padding); - - if(add_bias) - { - validate(bias.info()->padding(), padding); - } -} - -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -TEST_SUITE(BoundedReLu) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), - quantize_down_int32_to_uint8_scale_relu_cases)) -{ - // Validate output - validate(CLAccessor(_target), _reference); -} -TEST_SUITE_END() // BoundedReLu -TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint) const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2) -- cgit v1.2.1