diff options
author | Gian Marco <gianmarco.iodice@arm.com> | 2017-11-28 09:10:03 +0000 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:41:58 +0000 |
commit | 58c5794b917dae10ff115dd85ec69e2ca41136c1 (patch) | |
tree | f2cea2d94e6566be720256dc6105056798723699 /tests/validation/NEON | |
parent | 754e9526a7caf50876c2db9563dc72f096093b34 (diff) | |
download | ComputeLibrary-58c5794b917dae10ff115dd85ec69e2ca41136c1.tar.gz |
COMPMID-706 - Add GEMMLowp output stage for scaling by a fixed point number
DoD:
- Implement NEON kernel for quantizing down the gemmlowp result. The
result should be scaled by a fixedpoint number
- Implement OpenCL kernel for quantizing down the gemmlowp result. The
result should be scaled by a fixedpoint number
- Add test for validating the result
Required for:
- Integration of GEMMLowp in Android NN
- Convolution quantized
- Fully connected quantized
Change-Id: Ia963d25d695471e963961fb49a5600e78374ac4f
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110981
Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/NEON')
-rw-r--r-- | tests/validation/NEON/GEMMLowp.cpp | 87 |
1 files changed, 87 insertions, 0 deletions
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 6d13fdc939..a49ca4670a 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -255,6 +255,93 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, TEST_SUITE_END() // BoundedReLu TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale + +TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint) + +const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, + 2) + * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); + +const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, + 2) + * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true }); + +using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture = + GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>; + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), + quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), + shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) +{ + TensorShape shape_bias(shape[0]); + + // Create tensors + Tensor in = create_tensor<Tensor>(shape, DataType::S32); + Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32); + Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8); + + ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage; + output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); + + // Validate valid region input and output + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(in.info()->valid_region(), valid_region); + validate(out.info()->valid_region(), valid_region); + + // Validate valid region bias + if(add_bias) + { + const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); + validate(bias.info()->valid_region(), valid_region_bias); + } + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + validate(in.info()->padding(), padding); + validate(out.info()->padding(), padding); + + if(add_bias) + { + validate(bias.info()->padding(), padding); + } +} + +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_cases)) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_cases)) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +TEST_SUITE(BoundedReLu) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases)) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), + quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases)) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // BoundedReLu + +TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint TEST_SUITE_END() // OutputStage TEST_SUITE_END() // GEMMLowp |