aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/GEMMLowp.cpp
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-11-17 09:27:57 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit6b77e917801b4e979796ea75c538eef740482089 (patch)
tree0e693ecb1eb0b05018901a992b56781a08b9c266 /tests/validation/NEON/GEMMLowp.cpp
parentb3c81cb4100b3a449db5232364e18e649b26df58 (diff)
downloadComputeLibrary-6b77e917801b4e979796ea75c538eef740482089.tar.gz
COMPMID-665 - NEON: Add QASYMM8 in place Activation layer
- Added min and max arguments for QuantizeDownInt32ToUint8Scale in order to apply bounded relu - Added support for int32_t biases - Extended tests Change-Id: I015dae17faa7284766b5435ca33bcf593c1b2b69 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/96512 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests/validation/NEON/GEMMLowp.cpp')
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp66
1 files changed, 57 insertions, 9 deletions
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index ba91ced443..078096a0dd 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -131,34 +131,55 @@ TEST_SUITE(OutputStage)
TEST_SUITE(QuantizeDownInt32ToUint8Scale)
-using NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8Scale>;
+const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 2) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
+ 3)
+ * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
+
+const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 2) * framework::dataset::make("result_mult_int", 1,
+ 2)
+ * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
-const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -4, 4) * framework::dataset::make("result_mult_int", 1, 3) * framework::dataset::make("result_shift", 2,
- 4);
+using NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8Scale>;
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), quantize_down_int32_to_uint8_scale_cases),
- shape, result_offset, result_mult_int, result_shift)
+ shape, result_offset, result_mult_int, result_shift, min, max, add_bias)
{
+ TensorShape shape_bias(shape[0]);
+
// Create tensors
- Tensor in = create_tensor<Tensor>(shape, DataType::S32);
- Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8);
+ Tensor in = create_tensor<Tensor>(shape, DataType::S32);
+ Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
+ Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8);
ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
NEGEMMLowpQuantizeDownInt32ToUint8Scale output_stage;
- output_stage.configure(&in, &out, result_offset, result_mult_int, result_shift);
+ output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max);
- // Validate valid region
+ // Validate valid region input and output
const ValidRegion valid_region = shape_to_valid_region(shape);
validate(in.info()->valid_region(), valid_region);
validate(out.info()->valid_region(), valid_region);
+ // Validate valid region bias
+ if(add_bias)
+ {
+ const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
+ validate(bias.info()->valid_region(), valid_region_bias);
+ }
+
// Validate padding
const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
validate(in.info()->padding(), padding);
validate(out.info()->padding(), padding);
+
+ if(add_bias)
+ {
+ validate(bias.info()->padding(), padding);
+ }
}
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
@@ -173,8 +194,35 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture,
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale
+TEST_SUITE(BoundedReLu)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // BoundedReLu
+
+TEST_SUITE(AddBias)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // AddBias
+
+TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale
TEST_SUITE_END() // OutputStage
TEST_SUITE_END() // GEMMLowp