From f1109546ae79e56c8f6797248c5a15588a9a10eb Mon Sep 17 00:00:00 2001 From: morgolock Date: Tue, 15 Sep 2020 14:33:54 +0100 Subject: COMPMID-3184: Added no padding test for NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel Change-Id: I8c8b499be0a09886b701a4f678b40e57f2c48dd8 Signed-off-by: morgolock Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3990 Tested-by: Arm Jenkins Reviewed-by: Manuel Bottini Comments-Addressed: Arm Jenkins --- tests/validation/NEON/GEMMLowp.cpp | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 579499dd4e..ca7d50fd40 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -217,6 +217,29 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( // clang-format on // *INDENT-ON* +TEST_CASE(NoPaddingAdded, framework::DatasetMode::PRECOMMIT) +{ + Tensor input1 = create_tensor(TensorShape(21U, 13U), DataType::S32); + Tensor input2 = create_tensor(TensorShape(21U, 1U), DataType::S32); + Tensor output = create_tensor(TensorShape(21U, 13U), DataType::QASYMM8); + + GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(); + output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN; + output_stage.gemmlowp_min_bound = 0; + output_stage.gemmlowp_max_bound = 205; + output_stage.output_data_type = DataType::QASYMM8; + + + NEGEMMLowpOutputStage f; + f.configure(&input1, &input2, &output, output_stage); + + // Validate padding is zero + validate(input1.info()->padding(), PaddingSize()); + validate(input2.info()->padding(), PaddingSize()); + validate(output.info()->padding(), PaddingSize()); +} + + FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) { // Validate output -- cgit v1.2.1