From 7f0f790ae7f5dd044a5d7564492583b8df974a11 Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Thu, 7 Dec 2017 09:26:56 +0000 Subject: COMPMID-731 - Remove padding requirements for NEGEMMLowpOutputStage Used a left-over for loop in: - NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel in order to remove the padding requirements for AndroidNN Change-Id: I8ef529fc3d1adecf15fbe42002d99bc0030f131f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112266 Reviewed-by: Anthony Barbier Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com --- tests/validation/NEON/GEMMLowp.cpp | 78 +++++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 2 deletions(-) (limited to 'tests/validation/NEON/GEMMLowp.cpp') diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 7616df9eaa..a901b442ab 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -187,6 +187,43 @@ const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::m using NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture; +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( + framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type + }), + framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(20U), 1, DataType::S32), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), + })), + framework::dataset::make("Min",{ 0, + 8, + 13, + })), + framework::dataset::make("Max",{ 205, + 300, + 180, + })), + framework::dataset::make("Expected", { true, false, false })), + a_info, b_info, output_info, min, max, expected) +{ + // Lock tensors + Status status = NEGEMMLowpQuantizeDownInt32ToUint8Scale::validate(&a_info.clone()->set_is_resizable(false), + &b_info.clone()->set_is_resizable(false), + &output_info.clone()->set_is_resizable(false), + min, + max); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), quantize_down_int32_to_uint8_scale_cases), shape, result_offset, result_mult_int, result_shift, min, max, add_bias) { @@ -218,7 +255,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da } // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + const PaddingSize padding(0); validate(in.info()->padding(), padding); validate(out.info()->padding(), padding); @@ -269,6 +306,43 @@ const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framewo using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture; +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( + framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type + }), + framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(21U), 1, DataType::S32), + TensorInfo(TensorShape(20U), 1, DataType::S32), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), + })), + framework::dataset::make("Min",{ 0, + 8, + 13, + })), + framework::dataset::make("Max",{ 205, + 300, + 180, + })), + framework::dataset::make("Expected", { true, false, false })), + a_info, b_info, output_info, min, max, expected) +{ + // Lock tensors + Status status = NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false), + &b_info.clone()->set_is_resizable(false), + &output_info.clone()->set_is_resizable(false), + min, + max); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) @@ -301,7 +375,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da } // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + const PaddingSize padding(0); validate(in.info()->padding(), padding); validate(out.info()->padding(), padding); -- cgit v1.2.1