From 58c5794b917dae10ff115dd85ec69e2ca41136c1 Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Tue, 28 Nov 2017 09:10:03 +0000 Subject: COMPMID-706 - Add GEMMLowp output stage for scaling by a fixed point number DoD: - Implement NEON kernel for quantizing down the gemmlowp result. The result should be scaled by a fixedpoint number - Implement OpenCL kernel for quantizing down the gemmlowp result. The result should be scaled by a fixedpoint number - Add test for validating the result Required for: - Integration of GEMMLowp in Android NN - Convolution quantized - Fully connected quantized Change-Id: Ia963d25d695471e963961fb49a5600e78374ac4f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110981 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com Reviewed-by: Georgios Pinitas Reviewed-by: Anthony Barbier --- tests/validation/CL/GEMMLowp.cpp | 98 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 92 insertions(+), 6 deletions(-) (limited to 'tests/validation/CL/GEMMLowp.cpp') diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp index e3c686bebe..5148a31936 100644 --- a/tests/validation/CL/GEMMLowp.cpp +++ b/tests/validation/CL/GEMMLowp.cpp @@ -137,34 +137,120 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da } } -DISABLED_FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) { // Validate output validate(CLAccessor(_target), _reference); } -DISABLED_FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases)) +FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases)) { // Validate output validate(CLAccessor(_target), _reference); } TEST_SUITE(BoundedReLu) -DISABLED_FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) { // Validate output validate(CLAccessor(_target), _reference); } -DISABLED_FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), - quantize_down_int32_to_uint8_scale_relu_cases)) +FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), + quantize_down_int32_to_uint8_scale_relu_cases)) { // Validate output validate(CLAccessor(_target), _reference); } TEST_SUITE_END() // BoundedReLu - TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale + +TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint) + +const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, + 2) + * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); + +const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, + 2) + * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true }); + +using CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture = + GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture; + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), + quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), + shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) +{ + TensorShape shape_bias(shape[0]); + + // Create tensors + CLTensor in = create_tensor(shape, DataType::S32); + CLTensor bias = create_tensor(shape_bias, DataType::S32); + CLTensor out = create_tensor(shape, DataType::QASYMM8); + + ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage; + output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); + + // Validate valid region input and output + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(in.info()->valid_region(), valid_region); + validate(out.info()->valid_region(), valid_region); + + // Validate valid region bias + if(add_bias) + { + const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); + validate(bias.info()->valid_region(), valid_region_bias); + } + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + validate(in.info()->padding(), padding); + validate(out.info()->padding(), padding); + + if(add_bias) + { + validate(bias.info()->padding(), padding); + } +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +TEST_SUITE(BoundedReLu) +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases)) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // BoundedReLu +TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint + TEST_SUITE_END() // OutputStage TEST_SUITE_END() // GEMMLowp TEST_SUITE_END() // CL -- cgit v1.2.1