aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-09-20 10:09:43 +0100
committerGunes Bayir <gunes.bayir@arm.com>2023-09-29 11:07:32 +0000
commita396da19ee6e5c36ae07c11e4f16a6787e9bc143 (patch)
treed181f4185ce241667950dce52be87177af611262
parent6e56bf3b58719772111236d3b0030fbb5e8d2e16 (diff)
downloadComputeLibrary-a396da19ee6e5c36ae07c11e4f16a6787e9bc143.tar.gz
Implement Quantized Matmul T/T and T/Nt kernels using MMUL extension
Resolves: COMPMID-6476, COMPMID-6477 Change-Id: Ied37c269d5a108ff72f70e3ad932cf372bda5562 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10346 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl287
-rw-r--r--tests/validation/CL/MatMulLowpNativeMMULKernel.cpp60
2 files changed, 335 insertions, 12 deletions
diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl
index 4ab81d13cc..fdfb75d39c 100644
--- a/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl
+++ b/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl
@@ -514,7 +514,9 @@ __kernel void mat_mul_native_quantized_mmul_nt_t(
/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed
*
* Supported block configurations:
- * TODO: Report supported M0, N0, K0
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 4
*
* Similar to mat_mul_native_quantized_mmul_nt_nt()
*/
@@ -526,6 +528,149 @@ __kernel void mat_mul_native_quantized_mmul_t_nt(
#endif // defined(BIAS)
TENSOR3D_T(dst, BUFFER))
{
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within an mmul block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Calculate dst coordinates
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = dst_y;
+ const uint lhs_y = K0 * thread_x;
+
+ // Starting RHS coordinates
+ const uint rhs_x = dst_x;
+ const uint rhs_y = K0 * thread_y;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ // Calculate row and column sums
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(1, 1, 1, 1);
+
+ for(int k = 0; k < lhs_h; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, K0, M0, a);
+ TILE(DATA_TYPE, K0, N0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_a = (VEC_DATA_TYPE(DATA_TYPE, K0))(a[0].s[m0], a[1].s[m0], a[2].s[m0], a[3].s[m0]);
+
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_b = (VEC_DATA_TYPE(DATA_TYPE, K0))(b[0].s[n0], b[1].s[n0], b[2].s[n0], b[3].s[n0]);
+
+ c[m0].s[n0] = arm_matrix_multiply(vec_a, vec_b, c[m0].s[n0]);
+ })
+
+#if RHS_OFFSET != 0
+ // Row Sum of A: Calculate the sum of rows by multiplying A with
+ // a matrix of 1's from Right
+ a_sum[0].s[m0] = arm_matrix_multiply(vec_a, vec_1, a_sum[0].s[m0]);
+#endif // RHS_OFFSET != 0
+ })
+
+#if LHS_OFFSET != 0
+ // Column Sum of B: Calculate the sum of columns by multiplying B
+ // with a matrix of 1's from Left
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_b = (VEC_DATA_TYPE(DATA_TYPE, K0))(b[0].s[n0], b[1].s[n0], b[2].s[n0], b[3].s[n0]);
+
+ b_sum[0].s[n0] = arm_matrix_multiply(vec_1, vec_b, b_sum[0].s[n0]);
+ })
+#endif // LHS_OFFSET != 0
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += MMUL_K0 * rhs_stride_y;
+ }
+
+ // Do not write if the coordinates are out of bound
+ // But, read has to happen as arm_matrix_multiply() expects certain number of calls
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if RHS_OFFSET != 0 || LHS_OFFSET != 0
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ const int A = ((int)RHS_OFFSET) * a_sum[0].s[i];
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ c[i].s[j] -= A + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+#endif // RHS_OFFSET != 0 || LHS_OFFSET != 0
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
}
#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_NT)
@@ -533,7 +678,9 @@ __kernel void mat_mul_native_quantized_mmul_t_nt(
/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS transposed
*
* Supported block configurations:
- * TODO: Report supported M0, N0, K0
+ * - M0 = 1, 2, 3, 4, 8, 16
+ * - N0 = 1, 2, 3, 4, 8, 16
+ * - K0 = 4
*
* Similar to mat_mul_native_quantized_mmul_nt_nt()
*/
@@ -545,5 +692,141 @@ __kernel void mat_mul_native_quantized_mmul_t_t(
#endif // defined(BIAS)
TENSOR3D_T(dst, BUFFER))
{
+ const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0)
+ // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE)
+ const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0)
+ const uint z = get_global_id(2); // Batch
+
+ // Get section coordinates
+ const uint section_x = (x0 / MMUL_BLOCK_SIZE);
+ const uint section_y = y0;
+
+ // Get thread coordinates within an mmul block
+ const uint thread_id = (x0 % MMUL_BLOCK_SIZE);
+ const uint thread_x = thread_id % MMUL_N0;
+ const uint thread_y = (thread_id / MMUL_N0);
+
+ // Calculate dst coordinates
+ const uint dst_x_unclamped = thread_x * N0 + section_x * N0 * MMUL_N0;
+ const uint dst_y_unclamped = thread_y * M0 + section_y * M0 * MMUL_M0;
+ const uint dst_x = min(dst_x_unclamped, (uint)(N - N0));
+ const uint dst_y = min(dst_y_unclamped, (uint)(M - M0));
+
+ // Starting LHS coordinates
+ const uint lhs_x = dst_y;
+ const uint lhs_y = K0 * thread_x;
+
+ // Starting RHS coordinates
+ const uint rhs_x = K0 * thread_y;
+ const uint rhs_y = dst_x;
+
+ // Compute LHS/RHS/DST matrix address
+ lhs_offset_first_element_in_bytes += lhs_x * sizeof(DATA_TYPE) + lhs_y * lhs_stride_y + z * lhs_stride_z;
+ rhs_offset_first_element_in_bytes += rhs_x * sizeof(DATA_TYPE) + rhs_y * rhs_stride_y + z * rhs_stride_z;
+ dst_offset_first_element_in_bytes += dst_x * sizeof(DATA_TYPE) + dst_y * dst_stride_y + z * dst_stride_z;
+
+ // Initialize the accumulators
+ TILE(int, M0, N0, c);
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET);
+ })
+
+ // Calculate row and column sums
+ TILE(int, 1, N0, b_sum);
+ b_sum[0].v = 0;
+
+ TILE(int, 1, M0, a_sum);
+ a_sum[0].v = 0;
+
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(1, 1, 1, 1);
+
+ for(int k = 0; k < lhs_h; k += MMUL_K0)
+ {
+ TILE(DATA_TYPE, K0, M0, a);
+ TILE(DATA_TYPE, N0, K0, b);
+
+ // Load tile from the lhs/rhs tensors
+ T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a);
+ T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b);
+
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ vec_a = (VEC_DATA_TYPE(DATA_TYPE, K0))(a[0].s[m0], a[1].s[m0], a[2].s[m0], a[3].s[m0]);
+
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ c[m0].s[n0] = arm_matrix_multiply(vec_a, b[n0].v, c[m0].s[n0]);
+ })
+#if RHS_OFFSET != 0
+ // Row Sum of A: Calculate the sum of rows by multiplying A with
+ // a matrix of 1's from Right
+ a_sum[0].s[m0] = arm_matrix_multiply(vec_a, vec_1, a_sum[0].s[m0]);
+#endif // RHS_OFFSET != 0
+ })
+
+#if LHS_OFFSET != 0
+ // Column Sum of B: Calculate the sum of columns by multiplying B
+ // with a matrix of 1's from Left
+ LOOP_UNROLLING(int, n0, 0, 1, N0,
+ {
+ b_sum[0].s[n0] = arm_matrix_multiply(vec_1, b[n0].v, b_sum[0].s[n0]);
+ })
+#endif // LHS_OFFSET != 0
+
+ lhs_offset_first_element_in_bytes += MMUL_K0 * lhs_stride_y;
+ rhs_offset_first_element_in_bytes += MMUL_K0 * sizeof(DATA_TYPE);
+ }
+
+ // Do not write if the coordinates are out of bound
+ // But, read has to happen as arm_matrix_multiply() expects certain number of calls
+ if(dst_x_unclamped >= N || dst_y_unclamped >= M)
+ {
+ return;
+ }
+
+#if RHS_OFFSET != 0 || LHS_OFFSET != 0
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ const int A = ((int)RHS_OFFSET) * a_sum[0].s[i];
+ LOOP_UNROLLING(int, j, 0, 1, N0,
+ {
+ c[i].s[j] -= A + ((int)(LHS_OFFSET)) * b_sum[0].s[j];
+ })
+ })
+#endif // RHS_OFFSET != 0 || LHS_OFFSET != 0
+
+#ifdef BIAS
+ perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x);
+#endif // defined(BIAS)
+
+ // Quantize the tile
+ TILE(DATA_TYPE, M0, N0, cq);
+ T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
+
+ if(dst_x + N0 <= N || N0_LEFTOVER == 0)
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE(N0)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
+ else
+ {
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ if(dst_y + m0 < M || M0_LEFTOVER == 0)
+ {
+ VSTORE_PARTIAL(N0, N0_LEFTOVER)
+ (cq[m0].v, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + m0 * dst_stride_y));
+ }
+ })
+ }
}
#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_T)
diff --git a/tests/validation/CL/MatMulLowpNativeMMULKernel.cpp b/tests/validation/CL/MatMulLowpNativeMMULKernel.cpp
index 561c455a00..ac46b67c9e 100644
--- a/tests/validation/CL/MatMulLowpNativeMMULKernel.cpp
+++ b/tests/validation/CL/MatMulLowpNativeMMULKernel.cpp
@@ -61,9 +61,10 @@ const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 });
/** M0 values to test --nightly*/
const auto m0_values_nightly_lhs_nt = framework::dataset::make("M0", { 2, 4, 5, 8 });
+const auto m0_values_nightly_lhs_t = framework::dataset::make("M0", { 2, 4, 8 });
/** N0 values to test --nightly*/
-const auto n0_values_nightly_rhs_nt = framework::dataset::make("N0", { 1, 3, 8, 16 });
+const auto n0_values_nightly = framework::dataset::make("N0", { 1, 3, 8, 16 });
TEST_SUITE(CL)
TEST_SUITE(MatMulLowpNativeMMULKernel)
@@ -80,9 +81,12 @@ TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL)
{ MatMulKernelInfo(false, true, 3, 5, 4), false }, // N0 not in {1, 2, 3, 4, 8, 16}
{ MatMulKernelInfo(false, false, 3, 6, 4), false }, // N0 not in {1, 2, 3, 4, 8, 16}
{ MatMulKernelInfo(false, false, 3, 3, 8), false }, // K0 not in 4
+ { MatMulKernelInfo(true, false, 5, 3, 4), false }, // M0 not in {1, 2, 3, 4, 8, 16} when Lhs is transposed
{ MatMulKernelInfo(false, false, 9, 1, 4), true },
{ MatMulKernelInfo(false, true, 3, 16, 4), true },
{ MatMulKernelInfo(false, false, 7, 3, 4), true },
+ { MatMulKernelInfo(true, false, 8, 3, 4), true },
+ { MatMulKernelInfo(true, true, 4, 3, 4), true },
{ MatMulKernelInfo(false, false, 7, 3, 4, true), false }, // export to CLImage is unsupported for quantized types
};
@@ -216,7 +220,7 @@ TEST_SUITE(QASYMM8_SIGNED)
FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
framework::DatasetMode::ALL,
combine(datasets::SmallMatMulLowpMMULDataset(),
- make("TransposeA", { false }),
+ make("TransposeA", { false, true }),
make("TransposeB", { false, true }),
m0_values_precommit,
n0_values_precommit,
@@ -234,7 +238,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<int8_t>,
framework::DatasetMode::ALL,
combine(datasets::SmallMatMulLowpMMULWithBiasDataset(),
- make("TransposeA", { false }),
+ make("TransposeA", { false, true }),
make("TransposeB", { false, true }),
m0_values_precommit,
n0_values_precommit,
@@ -249,13 +253,31 @@ FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<
}
}
-FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
+FIXTURE_DATA_TEST_CASE(RunLargeLhsNotTransposed, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
framework::DatasetMode::NIGHTLY,
combine(datasets::LargeMatMulLowpMMULDataset(),
make("TransposeA", { false }),
make("TransposeB", { false, true }),
m0_values_nightly_lhs_nt,
- n0_values_nightly_rhs_nt,
+ n0_values_nightly,
+ make("K0", { 4 }),
+ make("ExportRhsToCLImage", { false }),
+ make("DataType", DataType::QASYMM8_SIGNED)))
+{
+ if(_device_supports_mmul)
+ {
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_quant);
+ }
+}
+
+FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::LargeMatMulLowpMMULDataset(),
+ make("TransposeA", { true }),
+ make("TransposeB", { false, true }),
+ m0_values_nightly_lhs_t,
+ n0_values_nightly,
make("K0", { 4 }),
make("ExportRhsToCLImage", { false }),
make("DataType", DataType::QASYMM8_SIGNED)))
@@ -272,7 +294,7 @@ FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeMMULKernelFixture<
FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulLowpNativeMMULKernelFixture<int8_t>,
framework::DatasetMode::ALL,
combine(datasets::HighDimensionalMatMulLowpMMULDataset(),
- make("TransposeA", { false }),
+ make("TransposeA", { false, true }),
make("TransposeB", { false, true }),
make("M0", { 2 }),
make("N0", { 2 }),
@@ -294,7 +316,7 @@ TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
framework::DatasetMode::ALL,
combine(datasets::SmallMatMulLowpMMULDatasetSubset(),
- make("TransposeA", { false }),
+ make("TransposeA", { false, true }),
make("TransposeB", { false, true }),
m0_values_precommit,
n0_values_precommit,
@@ -312,7 +334,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<uint8_t>,
framework::DatasetMode::ALL,
combine(datasets::SmallMatMulLowpMMULWithBiasDataset(),
- make("TransposeA", { false }),
+ make("TransposeA", { false, true }),
make("TransposeB", { false, true }),
m0_values_precommit,
n0_values_precommit,
@@ -327,13 +349,31 @@ FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpNativeMMULKernelWithBiasFixture<
}
}
-FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
+FIXTURE_DATA_TEST_CASE(RunLargeLhsNotTransposed, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
framework::DatasetMode::NIGHTLY,
combine(datasets::LargeMatMulLowpMMULDataset(),
make("TransposeA", { false }),
make("TransposeB", { false, true }),
m0_values_nightly_lhs_nt,
- n0_values_nightly_rhs_nt,
+ n0_values_nightly,
+ make("K0", { 4 }),
+ make("ExportRhsToCLImage", { false }),
+ make("DataType", DataType::QASYMM8)))
+{
+ if(_device_supports_mmul)
+ {
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_quant);
+ }
+}
+
+FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeMMULKernelFixture<uint8_t>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::LargeMatMulLowpMMULDataset(),
+ make("TransposeA", { true }),
+ make("TransposeB", { false, true }),
+ m0_values_nightly_lhs_t,
+ n0_values_nightly,
make("K0", { 4 }),
make("ExportRhsToCLImage", { false }),
make("DataType", DataType::QASYMM8)))