aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2020-11-17 09:41:13 +0000
committerSiCong Li <sicong.li@arm.com>2021-01-07 10:43:07 +0000
commit7e5b7bfc06c0bd8aecd809817866733c4fdf07fe (patch)
tree5c8cb9445715ccde7ad4dd28f281fe1c852e7e32
parenta8e2aeb7d2d46a7ab0e9523de145af9920fc1fa3 (diff)
downloadComputeLibrary-7e5b7bfc06c0bd8aecd809817866733c4fdf07fe.tar.gz
Review all shapes in datasets to account for padding removal Part 3
* Add the following configurations for stressing padding removal: * size = 1 * size = multiple of processing size * size = non-multiple of processing size Resolves COMPMID-3865 Signed-off-by: SiCong Li <sicong.li@arm.com> Change-Id: I2e0e6d4da129f64ba23cf7b9e0fa1fa1ad93efc3 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4440 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--tests/datasets/GEMMLowpFusedOffsetOutputDataset.h6
-rw-r--r--tests/datasets/SmallGEMMLowpDataset.h4
-rw-r--r--tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp17
-rw-r--r--tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp25
-rw-r--r--tests/validation/CL/GEMMMatrixMultiply.cpp22
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp21
6 files changed, 60 insertions, 35 deletions
diff --git a/tests/datasets/GEMMLowpFusedOffsetOutputDataset.h b/tests/datasets/GEMMLowpFusedOffsetOutputDataset.h
index fe3ca255f7..7ab068c211 100644
--- a/tests/datasets/GEMMLowpFusedOffsetOutputDataset.h
+++ b/tests/datasets/GEMMLowpFusedOffsetOutputDataset.h
@@ -167,10 +167,10 @@ class SmallGEMMLowpFusedOffsetOutputUint8Dataset final : public GEMMLowpFusedOff
public:
SmallGEMMLowpFusedOffsetOutputUint8Dataset()
{
- add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 13, 10, 210));
+ add_config(TensorShape(21U, 13U), TensorShape(1U, 21U), TensorShape(1U, 13U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 13, 10, 210));
add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 100, 2, 13, 10, 210));
add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 18, 23, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 200, 2, 13, 10, 210));
- add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 13, 10, 210));
+ add_config(TensorShape(32U, 72U), TensorShape(16U, 32U), TensorShape(16U, 72U), -9, 1, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -100, 2, 13, 10, 210));
add_config(TensorShape(21U, 1U), TensorShape(43U, 21U), TensorShape(43U, 1U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, -2, 254601600, 10, 10, 210));
add_config(TensorShape(31U, 3U), TensorShape(72U, 31U), TensorShape(72U, 3U), -2, 13, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, 0, 254601600, 10, 10, 210));
@@ -184,7 +184,7 @@ class SmallGEMMLowpFusedOffsetOutputInt8Dataset final : public GEMMLowpFusedOffs
public:
SmallGEMMLowpFusedOffsetOutputInt8Dataset()
{
- add_config(TensorShape(21U, 1U), TensorShape(43U, 21U), TensorShape(43U, 1U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -50, 2, 13, -10, 110));
+ add_config(TensorShape(21U, 1U), TensorShape(1U, 21U), TensorShape(1U, 1U), 0, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -50, 2, 13, -10, 110));
add_config(TensorShape(31U, 3U), TensorShape(72U, 31U), TensorShape(72U, 3U), -2, 13, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 0, 2, 13, -10, 110));
add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, 0, 2, 13, -10, 110));
add_config(TensorShape(38U, 43U), TensorShape(21U, 38U), TensorShape(21U, 43U), -3, -2, OutputStageInfo(GEMMLowpOutputStageType::QUANTIZE_DOWN, -40, 2, 13, -10, 110));
diff --git a/tests/datasets/SmallGEMMLowpDataset.h b/tests/datasets/SmallGEMMLowpDataset.h
index f16e3fa5f8..1b6c65307b 100644
--- a/tests/datasets/SmallGEMMLowpDataset.h
+++ b/tests/datasets/SmallGEMMLowpDataset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ class SmallGEMMLowpDataset final : public GEMMLowpDataset
public:
SmallGEMMLowpDataset()
{
- add_config(TensorShape(21U, 1U), TensorShape(43U, 21U), TensorShape(43U, 1U), 0, 0);
+ add_config(TensorShape(21U, 1U), TensorShape(1U, 21U), TensorShape(1U, 1U), 0, 0);
add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0);
add_config(TensorShape(31U, 3U), TensorShape(72U, 31U), TensorShape(72U, 3U), -2, 13);
add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4);
diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
index 4a574d8f1d..1057af95f2 100644
--- a/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
+++ b/tests/validation/CL/GEMMLowpMatrixMultiplyNative.cpp
@@ -53,8 +53,15 @@ namespace
{
// *INDENT-OFF*
// clang-format off
-/** M values to test */
-const auto m_values = framework::dataset::make("M", 37);
+/** M, N combinations to test
+ * 1: Special 1x1 case
+ * 2: Special multples of processor size in both dimensions
+ * 3: Non multiples of processor size in both dimensions
+*/
+const auto m_n_values = zip(
+ framework::dataset::make("M", {1, 16, 37}),
+ framework::dataset::make("N", {1, 16, 51})
+ );
/** M_W values to test */
const auto m_w_values = framework::dataset::make("M_W", 5);
@@ -93,8 +100,7 @@ const auto k0_values_nightly = framework::dataset::make("K0", { 1, 2, 3, 4, 8, 1
TEST_SUITE(CL)
TEST_SUITE(GEMMLowpMatrixMultiplyNative)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyNativeFixture, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(m_values,
- n_values),
+ combine(combine(combine(combine(combine(m_n_values,
k_values),
b_values),
m0_values_precommit),
@@ -106,8 +112,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyNativeFixture, framewor
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyNativeFixture, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(m_values,
- n_values),
+ combine(combine(combine(combine(combine(m_n_values,
k_values),
b_values),
m0_values_nightly),
diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp
index 0c651cddc2..4873a291ab 100644
--- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp
+++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp
@@ -62,8 +62,19 @@ namespace
{
// *INDENT-OFF*
// clang-format off
+
+/** M, N combinations to test
+ * 1: Special 1x1 case
+ * 2: Special multples of processor size in both dimensions
+ * 3: Non multiples of processor size in both dimensions
+*/
+const auto m_n_values = zip(
+ framework::dataset::make("M", {1, 16, 37}),
+ framework::dataset::make("N", {1, 16, 51})
+ );
+
/** M values to test */
-const auto m_values = framework::dataset::make("M", 37);
+const auto m_values = framework::dataset::make("M", {1, 37});
/** M_W values to test */
const auto m_w_values = framework::dataset::make("M_W", 5);
@@ -72,7 +83,7 @@ const auto m_w_values = framework::dataset::make("M_W", 5);
const auto m_h_values = framework::dataset::make("M_H", 7);
/** N values to test */
-const auto n_values = framework::dataset::make("N", 51);
+const auto n_values = framework::dataset::make("N", {1, 51});
/** K values to test */
const auto k_values = framework::dataset::make("K", 23);
@@ -125,9 +136,8 @@ TEST_SUITE(QUANTIZED)
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_n_values,
k_values),
b_values),
m0_values_precommit_1),
@@ -205,9 +215,8 @@ TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_n_values,
k_values),
b_values),
m0_values_precommit_2),
diff --git a/tests/validation/CL/GEMMMatrixMultiply.cpp b/tests/validation/CL/GEMMMatrixMultiply.cpp
index 5d2e211d91..fdf7f503ec 100644
--- a/tests/validation/CL/GEMMMatrixMultiply.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiply.cpp
@@ -72,8 +72,16 @@ const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} );
/** Beta values to test */
const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} );
-/** M values to test */
-const auto m_values = framework::dataset::make("M", {37, 1});
+/** M, N combinations to test
+ * 1: Special 1x1 case
+ * 2: Special multples of processor size in both dimensions
+ * 3: Non multiples of processor size in both dimensions
+ * 4: Special 1x1003 case
+*/
+const auto m_n_values = zip(
+ framework::dataset::make("M", {1, 16, 37, 1}),
+ framework::dataset::make("N", {1, 16, 51, 1003})
+ );
/** N values to test */
const auto n_values = framework::dataset::make("N", {51, 1003});
@@ -247,9 +255,8 @@ TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_n_values,
k_values),
b_values),
alpha_values),
@@ -287,9 +294,8 @@ TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture<half>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_n_values,
k_values),
b_values),
alpha_values),
diff --git a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp
index b2701e7f6c..d6507a06c4 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp
@@ -82,8 +82,15 @@ const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} );
/** Beta values to test */
const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} );
-/** M values to test */
-const auto m_values = framework::dataset::make("M", {37, 1});
+/** M, N combinations to test
+ * 1: Special 1x1 case
+ * 2: Special multples of processor size in both dimensions
+ * 3: Non multiples of processor size in both dimensions
+*/
+const auto m_n_values = zip(
+ framework::dataset::make("M", {1, 16, 37}),
+ framework::dataset::make("N", {1, 16, 51})
+ );
/** N values to test */
const auto n_values = framework::dataset::make("N", 51);
@@ -235,9 +242,8 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture<float>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_n_values,
k_values),
b_values),
alpha_values),
@@ -279,9 +285,8 @@ TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture<half>, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
- m_values,
- n_values),
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_n_values,
k_values),
b_values),
alpha_values),