aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2019-11-27 16:17:30 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2019-12-03 11:15:36 +0000
commitf9179d393a07eb9eed753e315df79d22391906c6 (patch)
treed8a1fd9d984bdd335d3ecac117ec33c4523211ef /tests
parentb714b1d6a53e6c33df2ea3c1e8340f20480d799b (diff)
downloadComputeLibrary-f9179d393a07eb9eed753e315df79d22391906c6.tar.gz
COMPMID-2793: Add support for QASYMM8_SIGNED in CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel
Change-Id: I8abfdd3372cc394b98ec038b9fcb4abfe9216894 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/2401 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp70
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h133
2 files changed, 154 insertions, 49 deletions
diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp
index 6ead11ab23..106d650109 100644
--- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp
+++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp
@@ -82,7 +82,8 @@ const auto k_values = framework::dataset::make("K", 23);
const auto b_values = framework::dataset::make("batch_size", 1, 3);
/** M0 values to test - Precommit */
-const auto m0_values_precommit = framework::dataset::make("M0", {4, 6});
+const auto m0_values_precommit_1 = framework::dataset::make("M0", {4});
+const auto m0_values_precommit_2 = framework::dataset::make("M0", {6});
/** N0 values to test - Precommit */
const auto n0_values_precommit = framework::dataset::make("N0", { 4 });
@@ -162,7 +163,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
n_values),
k_values),
framework::dataset::make("batch_size", 1)),
- m0_values_precommit),
+ m0_values_precommit_1),
n0_values_precommit),
k0_values_precommit),
h0_values_precommit),
@@ -172,25 +173,44 @@ m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_va
validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, h0_value, i_value_rhs);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(
+FIXTURE_DATA_TEST_CASE(RunSmall_1, CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_values,
+ n_values),
+ k_values),
+ b_values),
+ m0_values_precommit_1),
+ n0_values_precommit),
+ k0_values_precommit),
+ h0_values_precommit),
+ i_values_rhs),
+ t_values_rhs),
+ framework::dataset::make("DataType", { DataType::QASYMM8 })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall_2, CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_values,
n_values),
k_values),
b_values),
- m0_values_precommit),
+ m0_values_precommit_2),
n0_values_precommit),
k0_values_precommit),
h0_values_precommit),
i_values_rhs),
- t_values_rhs))
+ t_values_rhs),
+ framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFixture, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_values,
n_values),
k_values),
@@ -200,32 +220,53 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyReshapedOnlyRHSFixture,
k0_values_nightly),
h0_values_nightly),
i_values_rhs),
- t_values_rhs))
+ t_values_rhs),
+ framework::dataset::make("DataType", { DataType::QASYMM8 })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMLowpMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::ALL,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+FIXTURE_DATA_TEST_CASE(RunSmall3D_1, CLGEMMLowpMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ m_w_values,
+ m_h_values),
+ n_values),
+ k_values),
+ b_values),
+ m0_values_precommit_1),
+ n0_values_precommit),
+ k0_values_precommit),
+ h0_values_precommit),
+ i_values_rhs),
+ t_values_rhs),
+ framework::dataset::make("DataType", { DataType::QASYMM8 })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall3D_2, CLGEMMLowpMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
k_values),
b_values),
- m0_values_precommit),
+ m0_values_precommit_2),
n0_values_precommit),
k0_values_precommit),
h0_values_precommit),
i_values_rhs),
- t_values_rhs))
+ t_values_rhs),
+ framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMLowpMatrixMultiplyReshapedOnlyRHS3DFixture, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
m_w_values,
m_h_values),
n_values),
@@ -236,7 +277,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMLowpMatrixMultiplyReshapedOnlyRHS3DFixt
k0_values_nightly),
h0_values_nightly),
i_values_rhs),
- t_values_rhs))
+ t_values_rhs),
+ framework::dataset::make("DataType", { DataType::QASYMM8 })))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index c17105edad..db52be5062 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -877,7 +877,8 @@ class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework:
{
public:
template <typename...>
- void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs)
+ void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
+ unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
{
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = m0;
@@ -894,24 +895,40 @@ public:
const TensorShape lhs_shape(k, m, batch_size);
const TensorShape rhs_shape(n, k, batch_size);
- _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info);
- _reference = compute_reference(lhs_shape, rhs_shape);
+ _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type);
+ _reference = compute_reference(lhs_shape, rhs_shape, data_type);
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
- std::uniform_int_distribution<> distribution(1, 254);
- library->fill(tensor, distribution, i);
+ switch(tensor.data_type())
+ {
+ case DataType::QASYMM8:
+ {
+ // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+ std::uniform_int_distribution<> distribution(1, 254);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ case DataType::QASYMM8_SIGNED:
+ {
+ std::uniform_int_distribution<> distribution(-127, 126);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
}
- TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info)
+ TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info, DataType data_type)
{
// Create tensors
- TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
- TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
+ TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
+ TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
TensorType rhs_reshaped;
TensorType dst;
@@ -952,21 +969,36 @@ protected:
return dst;
}
- SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape)
+ SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type)
{
TensorShape dst_shape = lhs_shape;
dst_shape[0] = rhs_shape[0];
dst_shape[1] = lhs_shape[1];
- // Create reference
- SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
- SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
+ if(data_type == DataType::QASYMM8)
+ {
+ // Create reference
+ SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
+ SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
- // Fill reference
- fill(lhs, 0);
- fill(rhs, 1);
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
- return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+ return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+ }
+ else
+ {
+ // Create reference
+ SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
+ SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
+
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
+
+ return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
+ }
}
TensorType _target{};
@@ -978,8 +1010,8 @@ class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framewor
{
public:
template <typename...>
- void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0,
- bool interleave_rhs, bool transpose_rhs)
+ void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0,
+ unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type)
{
GEMMLHSMatrixInfo lhs_info;
lhs_info.m0 = m0;
@@ -999,24 +1031,40 @@ public:
const TensorShape lhs_shape(k, m, batch_size);
const TensorShape rhs_shape(n, k, batch_size);
- _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h);
- _reference = compute_reference(lhs_shape, rhs_shape, m_h);
+ _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, m_h, data_type);
+ _reference = compute_reference(lhs_shape, rhs_shape, m_h, data_type);
}
protected:
template <typename U>
void fill(U &&tensor, int i)
{
- // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
- std::uniform_int_distribution<> distribution(1, 254);
- library->fill(tensor, distribution, i);
+ switch(tensor.data_type())
+ {
+ case DataType::QASYMM8:
+ {
+ // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path
+ std::uniform_int_distribution<> distribution(1, 254);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ case DataType::QASYMM8_SIGNED:
+ {
+ std::uniform_int_distribution<> distribution(-127, 126);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
}
- TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h)
+ TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info,
+ const GEMMRHSMatrixInfo &rhs_info, unsigned int m_h, DataType data_type)
{
// Create tensors
- TensorType lhs = create_tensor<TensorType>(lhs_shape, DataType::QASYMM8, 1);
- TensorType rhs = create_tensor<TensorType>(rhs_shape, DataType::QASYMM8, 1);
+ TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1);
+ TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1);
TensorType rhs_reshaped;
TensorType dst;
@@ -1057,7 +1105,7 @@ protected:
return dst;
}
- SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h)
+ SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, unsigned int m_h, DataType data_type)
{
TensorShape dst_shape = lhs_shape;
dst_shape.set(0, rhs_shape[0]);
@@ -1065,15 +1113,30 @@ protected:
dst_shape.set(2, m_h);
dst_shape.set(3, lhs_shape[2]);
- // Create reference
- SimpleTensor<uint8_t> lhs{ lhs_shape, DataType::QASYMM8, 1 };
- SimpleTensor<uint8_t> rhs{ rhs_shape, DataType::QASYMM8, 1 };
+ if(data_type == DataType::QASYMM8)
+ {
+ // Create reference
+ SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 };
+ SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 };
- // Fill reference
- fill(lhs, 0);
- fill(rhs, 1);
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
- return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+ return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0);
+ }
+ else
+ {
+ // Create reference
+ SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 };
+ SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 };
+
+ // Fill reference
+ fill(lhs, 0);
+ fill(rhs, 1);
+
+ return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0);
+ }
}
TensorType _target{};