From 1f332d4a41393ed30a4e9df841eb4b545fa87486 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Fri, 29 Nov 2019 17:25:25 +0000 Subject: COMPMID-2794: Add support for QASYMM8_SIGNED in CLGEMMLowpOutputStage Change-Id: I93ad3e5b9531ce1699214ff6e657a76ffdaacedd Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/2396 Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- tests/validation/CL/GEMMLowp.cpp | 73 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 5 deletions(-) (limited to 'tests/validation/CL/GEMMLowp.cpp') diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp index 39543b174c..2890eb161b 100644 --- a/tests/validation/CL/GEMMLowp.cpp +++ b/tests/validation/CL/GEMMLowp.cpp @@ -211,9 +211,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, } TEST_SUITE_END() // BoundedReLu TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale - TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint) - const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2) * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); @@ -221,12 +219,10 @@ const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::d const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2) * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true }); - using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture; -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), - quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) { TensorShape shape_bias(shape[0]); @@ -297,6 +293,73 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedP } TEST_SUITE_END() // BoundedReLu TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint +TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint) +const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2) + * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); + +const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, 2) + * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128, -126) * framework::dataset::make("max", 110, 112) * framework::dataset::make("addBias", { false, true }); +using CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture = + GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture; + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_by_fixedpoint_cases), + shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) +{ + TensorShape shape_bias(shape[0]); + + // Create tensors + CLTensor in = create_tensor(shape, DataType::S32); + CLTensor bias = create_tensor(shape_bias, DataType::S32); + CLTensor out = create_tensor(shape, DataType::QASYMM8_SIGNED); + + ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage; + output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); + + // Validate valid region input and output + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(in.info()->valid_region(), valid_region); + validate(out.info()->valid_region(), valid_region); + + // Validate valid region bias + if(add_bias) + { + const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); + validate(bias.info()->valid_region(), valid_region_bias); + } + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding(); + validate(in.info()->padding(), padding); + validate(out.info()->padding(), padding); + + if(add_bias) + { + validate(bias.info()->padding(), padding); + } +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), + quantize_down_int32_to_int8_scale_by_fixedpoint_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +TEST_SUITE(BoundedReLu) +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), + quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +TEST_SUITE_END() // BoundedReLu +TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint) const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, -- cgit v1.2.1