aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/GEMMLowp.cpp
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2020-07-30 23:39:32 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2020-09-28 08:52:42 +0000
commitd175ecea20f5c9e3bff07f7c5d3e4bb5519f7cb9 (patch)
tree62b0957e4241acc257685dadb79442ed27f5cb96 /tests/validation/NEON/GEMMLowp.cpp
parentc6af9db1bb6b7a2093205f6f3088143bad660ff9 (diff)
downloadComputeLibrary-d175ecea20f5c9e3bff07f7c5d3e4bb5519f7cb9.tar.gz
COMPMID-3324: Remove configuration tests
Remove configuation tests that use the default data shapes. There is no need to run them since configure will run as part of the actual validation run. Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Change-Id: If6d88a6ba5e9463fa8c615fcf76a5c07d3049d53 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3638 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON/GEMMLowp.cpp')
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp154
1 files changed, 4 insertions, 150 deletions
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index ca7d50fd40..368433201b 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -79,26 +79,6 @@ TEST_SUITE(GEMMLowp)
TEST_SUITE(MatrixMultiplyCore)
using NEGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()),
- shape_a, shape_b, shape_c, a_offset, b_offset)
-{
- // Create tensors
- Tensor a = create_tensor<Tensor>(shape_a, DataType::QASYMM8);
- Tensor b = create_tensor<Tensor>(shape_b, DataType::QASYMM8);
- Tensor c = create_tensor<Tensor>(shape_c, DataType::S32);
-
- a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
- b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
-
- ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
- gemmlowp_mm.configure(&a, &b, nullptr, &c);
-}
-
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
@@ -224,11 +204,10 @@ TEST_CASE(NoPaddingAdded, framework::DatasetMode::PRECOMMIT)
Tensor output = create_tensor<Tensor>(TensorShape(21U, 13U), DataType::QASYMM8);
GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo();
- output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
- output_stage.gemmlowp_min_bound = 0;
- output_stage.gemmlowp_max_bound = 205;
- output_stage.output_data_type = DataType::QASYMM8;
-
+ output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
+ output_stage.gemmlowp_min_bound = 0;
+ output_stage.gemmlowp_max_bound = 205;
+ output_stage.output_data_type = DataType::QASYMM8;
NEGEMMLowpOutputStage f;
f.configure(&input1, &input2, &output, output_stage);
@@ -239,7 +218,6 @@ TEST_CASE(NoPaddingAdded, framework::DatasetMode::PRECOMMIT)
validate(output.info()->padding(), PaddingSize());
}
-
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
{
// Validate output
@@ -378,48 +356,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
- quantize_down_int32_to_uint8_scale_by_fixedpoint_cases),
- shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
-{
- TensorShape shape_bias(shape[0]);
-
- // Create tensors
- Tensor in = create_tensor<Tensor>(shape, DataType::S32);
- Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
- Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8);
-
- ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage;
- output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
-
- // Validate valid region input and output
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(in.info()->valid_region(), valid_region);
- validate(out.info()->valid_region(), valid_region);
-
- // Validate valid region bias
- if(add_bias)
- {
- const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
- validate(bias.info()->valid_region(), valid_region_bias);
- }
-
- // Validate padding
- const PaddingSize padding(0);
- validate(in.info()->padding(), padding);
- validate(out.info()->padding(), padding);
-
- if(add_bias)
- {
- validate(bias.info()->padding(), padding);
- }
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
{
@@ -502,47 +438,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
- quantize_down_int32_to_int8_scale_by_fixedpoint_cases),
- shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias)
-{
- TensorShape shape_bias(shape[0]);
-
- // Create tensors
- Tensor in = create_tensor<Tensor>(shape, DataType::S32);
- Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
- Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8_SIGNED);
-
- ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage;
- output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
-
- // Validate valid region input and output
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(in.info()->valid_region(), valid_region);
- validate(out.info()->valid_region(), valid_region);
-
- // Validate valid region bias
- if(add_bias)
- {
- const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
- validate(bias.info()->valid_region(), valid_region_bias);
- }
-
- // Validate padding
- const PaddingSize padding(0);
- validate(in.info()->padding(), padding);
- validate(out.info()->padding(), padding);
-
- if(add_bias)
- {
- validate(bias.info()->padding(), padding);
- }
-}
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
{
@@ -616,47 +511,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
- quantize_down_int32_to_int16_scale_by_fixedpoint_cases),
- shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias)
-{
- TensorShape shape_bias(shape[0]);
-
- // Create tensors
- Tensor in = create_tensor<Tensor>(shape, DataType::S32);
- Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32);
- Tensor out = create_tensor<Tensor>(shape, DataType::QSYMM16);
-
- ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint output_stage;
- output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, min, max);
-
- // Validate valid region input and output
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(in.info()->valid_region(), valid_region);
- validate(out.info()->valid_region(), valid_region);
-
- // Validate valid region bias
- if(add_bias)
- {
- const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias);
- validate(bias.info()->valid_region(), valid_region_bias);
- }
-
- // Validate padding
- const PaddingSize padding(0);
- validate(in.info()->padding(), padding);
- validate(out.info()->padding(), padding);
-
- if(add_bias)
- {
- validate(bias.info()->padding(), padding);
- }
-}
TEST_SUITE(NoRelu)
TEST_SUITE(MultSmallerEq1)
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),