aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp3
-rw-r--r--SConscript1
-rw-r--r--docs/user_guide/release_version_and_change_log.dox2
-rw-r--r--filelist.json2
-rw-r--r--src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl143
-rw-r--r--src/gpu/cl/ClKernelLibrary.cpp8
-rw-r--r--src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp28
-rw-r--r--src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp165
-rw-r--r--src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h69
-rw-r--r--src/gpu/cl/kernels/ClMatMulNativeKernel.cpp25
-rw-r--r--src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp90
-rw-r--r--src/gpu/cl/kernels/helpers/MatMulKernelHelpers.cpp103
-rw-r--r--src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h68
-rw-r--r--tests/validation/CL/MatMulLowpNativeMMULKernel.cpp208
14 files changed, 795 insertions, 120 deletions
diff --git a/Android.bp b/Android.bp
index 547f11d0d2..da69d4fe87 100644
--- a/Android.bp
+++ b/Android.bp
@@ -47,6 +47,7 @@ opencl_srcs = [
"src/core/CL/cl_kernels/common/mat_mul.cl",
"src/core/CL/cl_kernels/common/mat_mul_mmul.cl",
"src/core/CL/cl_kernels/common/mat_mul_quantized.cl",
+ "src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl",
"src/core/CL/cl_kernels/common/mean_stddev_normalization.cl",
"src/core/CL/cl_kernels/common/memset.cl",
"src/core/CL/cl_kernels/common/minmax_layer.cl",
@@ -721,6 +722,7 @@ cc_library_static {
"src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp",
"src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp",
"src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp",
+ "src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp",
"src/gpu/cl/kernels/ClMatMulNativeKernel.cpp",
"src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp",
"src/gpu/cl/kernels/ClMulKernel.cpp",
@@ -748,6 +750,7 @@ cc_library_static {
"src/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.cpp",
"src/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.cpp",
"src/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.cpp",
+ "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.cpp",
"src/gpu/cl/operators/ClActivation.cpp",
"src/gpu/cl/operators/ClAdd.cpp",
"src/gpu/cl/operators/ClCast.cpp",
diff --git a/SConscript b/SConscript
index 7bc5affde1..fab9b65acc 100644
--- a/SConscript
+++ b/SConscript
@@ -430,6 +430,7 @@ if env['opencl'] and env['embed_kernels']:
'src/core/CL/cl_kernels/common/mat_mul.cl',
'src/core/CL/cl_kernels/common/mat_mul_mmul.cl',
'src/core/CL/cl_kernels/common/mat_mul_quantized.cl',
+ 'src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl',
'src/core/CL/cl_kernels/common/mean_stddev_normalization.cl',
'src/core/CL/cl_kernels/common/memset.cl',
'src/core/CL/cl_kernels/common/minmax_layer.cl',
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index 0142497adc..1a5add02fc 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -53,6 +53,8 @@ v23.11 Public major release
- Optimize @ref cpu::CpuReshape
- Port the following kernels in the experimental Dynamic Fusion interface to use the new Compute Kernel Writer interface with support for FP16/FP32 only:
- @ref experimental::dynamic_fusion::GpuCkwPool2d
+ - Add new OpenCLâ„¢ kernels:
+ - @ref opencl::kernels::ClMatMulLowpNativeMMULKernel support for QASYMM8 and QASYMM8_SIGNED, with batch support
v23.08 Public major release
- Deprecate the legacy 'libarm_compute_core' library. This library is an artifact of Compute Library's legacy library architecture and no longer serves any purpose.
diff --git a/filelist.json b/filelist.json
index 393ce933b6..d3cf984d8b 100644
--- a/filelist.json
+++ b/filelist.json
@@ -521,8 +521,10 @@
"files": {
"common": [
"src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp",
+ "src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp",
"src/gpu/cl/kernels/ClMatMulNativeKernel.cpp",
"src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp",
+ "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.cpp",
"src/gpu/cl/operators/ClMatMul.cpp",
"src/runtime/CL/functions/CLMatMul.cpp",
"src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.cpp",
diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl
new file mode 100644
index 0000000000..56e278c584
--- /dev/null
+++ b/src/core/CL/cl_kernels/common/mat_mul_quantized_mmul.cl
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "activation_float_helpers.h"
+#include "helpers.h"
+#include "tile_helpers.h"
+
+#ifdef BIAS
+// This function performs in-place bias addition for integer datatype when bias is enabled.
+// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4).
+inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(int, M0, N0, acc), uint x)
+{
+ TILE(int, 1, N0, bias_tile);
+
+ // below expands to use bias_ptr and bias_offset_first_element_in_bytes
+ T_LOAD(int, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, acc, bias_tile, acc);
+}
+#endif // defined(BIAS)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only
+ *
+ * TODO: report build configuration
+ *
+ * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8_SIGNED/QASYMM8
+ * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] lhs_w The width of the lhs tensor
+ * @param[in] lhs_h The height of the lhs tensor
+ * @param[in] lhs_n Number of the matrices (buffers) in the batch
+ * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix
+ * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes)
+ * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes)
+ * @param[in] rhs_w The width of the rhs tensor
+ * @param[in] rhs_h The height of the rhs tensor
+ * @param[in] rhs_n Number of the matrices (buffers) in the batch
+ * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix
+ * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr
+ * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr
+ * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes)
+ * @param[in] dst_w The width of the dst tensor
+ * @param[in] dst_h The height of the dst tensor
+ * @param[in] dst_n Number of the matrices (buffers) in the batch
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix
+ */
+__kernel void mat_mul_native_quantized_mmul_nt_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_NT)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS transposed - buffer only
+ *
+ * Supported block configurations:
+ * TODO: Report supported M0, N0, K0
+ *
+ * Similar to mat_mul_native_quantized_mmul_nt_nt()
+ */
+__kernel void mat_mul_native_quantized_mmul_nt_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_NT_T)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_NT)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed
+ *
+ * Supported block configurations:
+ * TODO: Report supported M0, N0, K0
+ *
+ * Similar to mat_mul_native_quantized_mmul_nt_nt()
+ */
+__kernel void mat_mul_native_quantized_mmul_t_nt(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_NT)
+
+#if defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_T)
+/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS transposed
+ *
+ * Supported block configurations:
+ * TODO: Report supported M0, N0, K0
+ *
+ * Similar to mat_mul_native_quantized_mmul_nt_nt()
+ */
+__kernel void mat_mul_native_quantized_mmul_t_t(
+ TENSOR3D_T(lhs, BUFFER),
+ TENSOR3D_T(rhs, BUFFER),
+#ifdef BIAS
+ TENSOR3D_T(bias, BUFFER),
+#endif // defined(BIAS)
+ TENSOR3D_T(dst, BUFFER))
+{
+}
+#endif // defined(MAT_MUL_NATIVE_QUANTIZED_MMUL_T_T)
diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp
index e4a3d30b6d..73bb96298e 100644
--- a/src/gpu/cl/ClKernelLibrary.cpp
+++ b/src/gpu/cl/ClKernelLibrary.cpp
@@ -322,6 +322,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "mat_mul_native_quantized_nt_t", "common/mat_mul_quantized.cl" },
{ "mat_mul_native_quantized_t_nt", "common/mat_mul_quantized.cl" },
{ "mat_mul_native_quantized_t_t", "common/mat_mul_quantized.cl" },
+ { "mat_mul_native_quantized_mmul_nt_nt", "common/mat_mul_quantized_mmul.cl" },
+ { "mat_mul_native_quantized_mmul_nt_t", "common/mat_mul_quantized_mmul.cl" },
+ { "mat_mul_native_quantized_mmul_t_nt", "common/mat_mul_quantized_mmul.cl" },
+ { "mat_mul_native_quantized_mmul_t_t", "common/mat_mul_quantized_mmul.cl" },
{ "max_unpooling_layer_2", "common/unpooling_layer.cl" },
{ "mean_stddev_normalization", "common/mean_stddev_normalization.cl" },
{ "memset", "common/memset.cl" },
@@ -781,6 +785,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
"common/mat_mul_quantized.cl",
#include "./cl_kernels/common/mat_mul_quantized.clembed"
},
+ {
+ "common/mat_mul_quantized_mmul.cl",
+#include "./cl_kernels/common/mat_mul_quantized_mmul.clembed"
+ },
#ifdef ENABLE_NCHW_KERNELS
{
"nchw/batch_to_space.cl",
diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp
index f7fdbe2c23..66331bc818 100644
--- a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp
+++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp
@@ -23,22 +23,22 @@
*/
#include "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h"
-#include "arm_compute/core/utils/ActivationFunctionUtils.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/ITensorPack.h"
+#include "arm_compute/core/QuantizationInfo.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/utils/ActivationFunctionUtils.h"
+#include "arm_compute/core/utils/StringUtils.h"
#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "arm_compute/core/utils/StringUtils.h"
#include "src/common/utils/Log.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/gpu/cl/ClCompileContext.h"
-
-#include "arm_compute/core/QuantizationInfo.h"
+#include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h"
#include "support/Cast.h"
#include "support/StringSupport.h"
@@ -80,24 +80,6 @@ Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
return Status{};
}
-
-Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info)
-{
- const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x();
- const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y();
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty");
-
- constexpr size_t batch_dim_start = 2;
- for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported");
- }
-
- return Status{};
-}
}
ClMatMulLowpNativeKernel::ClMatMulLowpNativeKernel()
{
@@ -110,7 +92,7 @@ Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ITensorI
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.activation() != ActivationFunction::IDENTITY && act_info.activation() != ActivationFunction::RELU
&& act_info.activation() != ActivationFunction::LU_BOUNDED_RELU && act_info.activation() != ActivationFunction::BOUNDED_RELU),
diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp
new file mode 100644
index 0000000000..4a6a3f396e
--- /dev/null
+++ b/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/ITensorPack.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/utils/StringUtils.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/gpu/cl/ClCompileContext.h"
+#include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h"
+#include "support/Cast.h"
+#include "support/StringSupport.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+namespace
+{
+// Block size dimensions for the MMUL extension
+constexpr int mmul_m0 = 4;
+constexpr int mmul_n0 = 4;
+constexpr int mmul_k0 = 16;
+
+Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
+{
+ ARM_COMPUTE_UNUSED(matmul_kernel_info);
+ // TODO: Validate MatMulKernelInfo
+ return Status{};
+}
+} // namespace
+
+ClMatMulLowpNativeMMULKernel::ClMatMulLowpNativeMMULKernel()
+{
+ _type = CLKernelType::GEMM;
+}
+
+Status ClMatMulLowpNativeMMULKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info,
+ const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
+ // TODO: Check MMUL block sizes against the tensor shapes
+ ARM_COMPUTE_UNUSED(mmul_k0);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.activation() != ActivationFunction::IDENTITY && act_info.activation() != ActivationFunction::RELU
+ && act_info.activation() != ActivationFunction::LU_BOUNDED_RELU && act_info.activation() != ActivationFunction::BOUNDED_RELU),
+ "Activation Function specified is unsupported.");
+ const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info);
+
+ if(dst->total_size() != 0)
+ {
+ const TensorInfo tensor_info_output = dst->clone()->set_tensor_shape(expected_output_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst);
+ }
+
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != bias->dimension(0));
+ }
+
+ return Status{};
+}
+
+void ClMatMulLowpNativeMMULKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst,
+ const MatMulKernelInfo &matmul_kernel_info,
+ const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst);
+ ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst, matmul_kernel_info, act_info);
+ ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, bias, dst, matmul_kernel_info));
+
+ // dst tensor auto initialization if not yet initialized
+ auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)));
+
+ ARM_COMPUTE_UNUSED(compile_context, lhs, rhs, bias, matmul_kernel_info, act_info);
+
+ // Configure kernel window
+ const auto win_config = validate_and_configure_window_for_mmul_kernels(lhs, rhs, dst, matmul_kernel_info, mmul_m0, mmul_n0);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ IClKernel::configure_internal(win_config.second);
+
+ CLBuildOptions build_opts;
+ // TODO: Build options & configuration
+
+ std::string kernel_name("mat_mul_native_quantized_mmul");
+ kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt";
+ kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt";
+
+ // A macro guard to compile ONLY the kernel of interest
+ build_opts.add_option("-D" + upper_string(kernel_name));
+
+ // Create kernel
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
+
+ // TODO: Tuner configuration
+}
+
+void ClMatMulLowpNativeMMULKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ const auto *lhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
+ const auto *rhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
+ const auto *bias = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2)); // nullptr if bias is not present
+ auto *dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst);
+ ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst);
+
+ unsigned int idx = 0;
+ add_3d_tensor_nhw_argument(idx, lhs);
+ add_3d_tensor_nhw_argument(idx, rhs);
+
+ if(bias != nullptr)
+ {
+ add_3d_tensor_nhw_argument(idx, bias);
+ }
+ add_3d_tensor_nhw_argument(idx, dst);
+
+ // LWS_x should be multiple of 16 at least. (32, 2) has been chosen to have more work-items on a single core
+ // LWS also enforces the order of execution of the work items which improves cache utilization
+ enqueue(queue, *this, window, cl::NDRange(32, 2), false);
+}
+
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h b/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h
new file mode 100644
index 0000000000..d2aa40b2e2
--- /dev/null
+++ b/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEMMULKERNEL_H
+#define ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEMMULKERNEL_H
+
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+#include "src/core/common/Macros.h"
+#include "src/gpu/cl/ClCompileContext.h"
+#include "src/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+// Forward declerations
+struct MatMulKernelInfo;
+namespace opencl
+{
+namespace kernels
+{
+class ClMatMulLowpNativeMMULKernel : public IClKernel
+{
+public:
+ ClMatMulLowpNativeMMULKernel();
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClMatMulLowpNativeMMULKernel);
+
+ /** Initialise the kernel's input and output.
+ *
+ * Similar to @ref ClMatMulLowpNativeKernel::configure()
+ *
+ * @return a status
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to @ref ClMatMulLowpNativeKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif // ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEMMULKERNEL_H
diff --git a/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp
index 8f8ccfc41f..41ba5d5e25 100644
--- a/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp
+++ b/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp
@@ -23,20 +23,21 @@
*/
#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h"
-#include "arm_compute/core/utils/ActivationFunctionUtils.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/utils/ActivationFunctionUtils.h"
+#include "arm_compute/core/utils/StringUtils.h"
#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/StringUtils.h"
#include "src/common/utils/Log.h"
#include "src/core/CL/CLUtils.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h"
+#include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h"
#include "support/Cast.h"
#include "support/StringSupport.h"
@@ -79,24 +80,6 @@ Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
return Status{};
}
-Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info)
-{
- const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x();
- const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y();
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty");
-
- constexpr size_t batch_dim_start = 2;
- for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported");
- }
-
- return Status{};
-}
-
Status validate_export_to_cl_image(const ITensorInfo *rhs, const MatMulKernelInfo &matmul_kernel_info)
{
ARM_COMPUTE_RETURN_ERROR_ON(matmul_kernel_info.export_rhs_to_cl_image && rhs->lock_paddings());
@@ -131,7 +114,7 @@ Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
ARM_COMPUTE_RETURN_ON_ERROR(validate_export_to_cl_image(rhs, matmul_kernel_info));
const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info);
diff --git a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp
index 44c720a40c..2420ad6a78 100644
--- a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp
+++ b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp
@@ -28,13 +28,14 @@
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/utils/StringUtils.h"
#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/StringUtils.h"
#include "src/common/utils/Log.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h"
#include "support/Cast.h"
#include "support/StringSupport.h"
@@ -52,13 +53,6 @@ constexpr int mmul_m0 = 4;
constexpr int mmul_n0 = 4;
constexpr int mmul_k0 = 4;
-inline std::pair<int, int> adjust_m0_n0(int m0, int n0, int m, int n)
-{
- m0 = std::min(m0, m);
- n0 = adjust_vec_size(n0, n);
- return { m0, n0 };
-}
-
Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
{
const bool adj_lhs = matmul_kernel_info.adj_lhs;
@@ -83,68 +77,6 @@ Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info)
return Status{};
}
-
-Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info)
-{
- const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x();
- const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y();
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG_VAR((lhs_k % mmul_k0) != 0, "K dimension must be a multiple of %d", mmul_k0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty");
-
- constexpr size_t batch_dim_start = 2;
- for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported");
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info)
-{
- ARM_COMPUTE_UNUSED(lhs, rhs);
-
- const Window win = calculate_max_window(*dst, Steps(1, 1));
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win.collapse(win, Window::DimZ);
-
- // Reconfigure window size, one arm_matrix_multiply call needs 16 threads to finish.
- Window::Dimension x_dimension = collapsed.x();
- Window::Dimension y_dimension = collapsed.y();
-
- const int m = dst->dimension(1);
- const int n = dst->dimension(0);
-
- int m0{};
- int n0{};
- std::tie(m0, n0) = adjust_m0_n0(matmul_kernel_info.m0, matmul_kernel_info.n0, m, n);
-
- // Make M and N multiple of M0 and N0 respectively
- const unsigned int ceil_to_multiple_n_n0 = ceil_to_multiple(n, n0);
- const unsigned int ceil_to_multiple_m_m0 = ceil_to_multiple(m, m0);
-
- // Divide M and N by M0 and N0 respectively
- const unsigned int n_div_n0 = ceil_to_multiple_n_n0 / n0;
- const unsigned int m_div_m0 = ceil_to_multiple_m_m0 / m0;
-
- // Make n_div_n0 and m_div_m0 multiple of mmul_n0 and mmul_m0 respectively
- const unsigned int ceil_to_multiple_n_div_n0_mmul_n0 = ceil_to_multiple(n_div_n0, mmul_n0);
- const unsigned int ceil_to_multiple_m_div_m0_mmul_m0 = ceil_to_multiple(m_div_m0, mmul_m0);
-
- // Ensure x_dimension is multiple of MMUL block size (mmul_m0 * mmul_n0)
- x_dimension.set_end(ceil_to_multiple_n_div_n0_mmul_n0 * mmul_m0);
- y_dimension.set_end(ceil_to_multiple_m_div_m0_mmul_m0 / mmul_m0);
-
- collapsed.set(Window::DimX, x_dimension);
- collapsed.set(Window::DimY, y_dimension);
-
- return std::make_pair(Status{}, collapsed);
-}
}
ClMatMulNativeMMULKernel::ClMatMulNativeMMULKernel()
{
@@ -158,9 +90,14 @@ Status ClMatMulNativeMMULKernel::validate(const ITensorInfo *lhs, const ITensorI
ARM_COMPUTE_RETURN_ERROR_ON_MSG(!arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()), "The extension cl_arm_matrix_multiply is not supported on the target platform");
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info));
- const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info);
+ const TensorShape &lhs_shape = lhs->tensor_shape();
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_input_shapes(lhs_shape, rhs->tensor_shape(), matmul_kernel_info));
+
+ const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x();
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG_VAR((lhs_k % mmul_k0) != 0, "K dimension must be a multiple of %d", mmul_k0);
+
+ const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs_shape, rhs->tensor_shape(), matmul_kernel_info);
if(dst->total_size() != 0)
{
@@ -195,12 +132,13 @@ void ClMatMulNativeMMULKernel::configure(const ClCompileContext &compile_context
_n = n;
_k = k;
- int m0{};
- int n0{};
- std::tie(m0, n0) = adjust_m0_n0(matmul_kernel_info.m0, matmul_kernel_info.n0, m, n);
+ const int m0 = std::min(matmul_kernel_info.m0, m);
+ const int n0 = adjust_vec_size(matmul_kernel_info.n0, n);
// Configure kernel window
- const auto win_config = validate_and_configure_window(lhs, rhs, dst, matmul_kernel_info);
+ const auto win_config = validate_and_configure_window_for_mmul_kernels(lhs, rhs, dst, matmul_kernel_info, mmul_m0,
+ mmul_n0);
+
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
IClKernel::configure_internal(win_config.second);
diff --git a/src/gpu/cl/kernels/helpers/MatMulKernelHelpers.cpp b/src/gpu/cl/kernels/helpers/MatMulKernelHelpers.cpp
new file mode 100644
index 0000000000..2407c6ca5e
--- /dev/null
+++ b/src/gpu/cl/kernels/helpers/MatMulKernelHelpers.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h"
+
+#include "arm_compute/core/Coordinates.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
+#include "arm_compute/core/utils/math/Math.h"
+
+#include "src/core/helpers/WindowHelpers.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+Status validate_matmul_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info)
+{
+ const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x();
+ const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y();
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match.");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty");
+
+ constexpr size_t batch_dim_start = 2;
+ for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported");
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window_for_mmul_kernels(const ITensorInfo *lhs,
+ const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info,
+ int mmul_m0, int mmul_n0)
+{
+ ARM_COMPUTE_UNUSED(lhs, rhs);
+
+ const Window win = calculate_max_window(*dst, Steps(1, 1));
+
+ // Collapse along the Z direction
+ // This collapse needs to be here in order to tune the Z dimension of LWS
+ Window collapsed = win.collapse(win, Window::DimZ);
+
+ // Reconfigure window size, one arm_matrix_multiply call needs 16 threads to finish.
+ Window::Dimension x_dimension = collapsed.x();
+ Window::Dimension y_dimension = collapsed.y();
+
+ const int m = dst->dimension(1);
+ const int n = dst->dimension(0);
+
+ const int m0 = std::min(matmul_kernel_info.m0, m);
+ const int n0 = adjust_vec_size(matmul_kernel_info.n0, n);
+
+ // Make M and N multiple of M0 and N0 respectively
+ const unsigned int ceil_to_multiple_n_n0 = ceil_to_multiple(n, n0);
+ const unsigned int ceil_to_multiple_m_m0 = ceil_to_multiple(m, m0);
+
+ // Divide M and N by M0 and N0 respectively
+ const unsigned int n_div_n0 = ceil_to_multiple_n_n0 / n0;
+ const unsigned int m_div_m0 = ceil_to_multiple_m_m0 / m0;
+
+ // Make n_div_n0 and m_div_m0 multiple of mmul_n0 and mmul_m0 respectively
+ const unsigned int ceil_to_multiple_n_div_n0_mmul_n0 = ceil_to_multiple(n_div_n0, mmul_n0);
+ const unsigned int ceil_to_multiple_m_div_m0_mmul_m0 = ceil_to_multiple(m_div_m0, mmul_m0);
+
+ // Ensure x_dimension is multiple of MMUL block size (mmul_m0 * mmul_n0)
+ x_dimension.set_end(ceil_to_multiple_n_div_n0_mmul_n0 * mmul_m0);
+ y_dimension.set_end(ceil_to_multiple_m_div_m0_mmul_m0 / mmul_m0);
+
+ collapsed.set(Window::DimX, x_dimension);
+ collapsed.set(Window::DimY, y_dimension);
+
+ return std::make_pair(Status{}, collapsed);
+}
+
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h b/src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h
new file mode 100644
index 0000000000..210f22b109
--- /dev/null
+++ b/src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_SRC_GPU_CL_KERNELS_HELPERS_MATMULKERNELHELPERS_H
+#define ACL_SRC_GPU_CL_KERNELS_HELPERS_MATMULKERNELHELPERS_H
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** Validate the input shapes of Matmul operation
+ *
+ * @param[in] lhs_shape Lhs tensor shape
+ * @param[in] rhs_shape Rhs tensor shape
+ * @param[in] matmul_kernel_info Matmul kernel info
+ *
+ * @return true if the shapes and matmul kernel info matches
+ */
+Status validate_matmul_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape,
+ const MatMulKernelInfo &matmul_kernel_info);
+
+/** Validate and configure window for Matmul MMUL kernels
+ *
+ * @param[in] lhs Lhs tensor info
+ * @param[in] rhs Rhs tensor info
+ * @param[in] dst Dst tensor info
+ * @param[in] matmul_kernel_info Matmul kernel info
+ * @param[in] mmul_m0 Number of rows in the MMUL block
+ * @param[in] mmul_n0 Number of columns in the MMUL block
+ *
+ * @return a pair of Status and Window object
+ */
+std::pair<Status, Window> validate_and_configure_window_for_mmul_kernels(const ITensorInfo *lhs,
+ const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info,
+ int mmul_m0, int mmul_n0);
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+
+#endif // ACL_SRC_GPU_CL_KERNELS_HELPERS_MATMULKERNELHELPERS_H
diff --git a/tests/validation/CL/MatMulLowpNativeMMULKernel.cpp b/tests/validation/CL/MatMulLowpNativeMMULKernel.cpp
new file mode 100644
index 0000000000..10d893e5c4
--- /dev/null
+++ b/tests/validation/CL/MatMulLowpNativeMMULKernel.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+#include "src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.h"
+
+#include "tests/datasets/LargeMatMulDataset.h"
+#include "tests/datasets/SmallMatMulDataset.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/MatMulKernelFixture.h"
+#include "tests/validation/reference/Permute.h"
+
+#include <tuple>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+// TODO: enable
+// constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
+}
+template <typename T>
+using CLMatMulLowpNativeMMULKernelFixture = MatMulKernelValidationFixture<T, ClMatMulLowpNativeMMULKernel>;
+
+template <typename T>
+using CLMatMulLowpKernelWithBiasFixture = MatMulKernelWithBiasValidation<T, ClMatMulLowpNativeMMULKernel>;
+
+TEST_SUITE(CL)
+TEST_SUITE(MatMulLowpNativeMMULKernel)
+TEST_SUITE(Validate)
+
+TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL)
+{
+ using MatMulConfigurationPair = std::pair<MatMulKernelInfo, bool>;
+
+ const std::vector<MatMulConfigurationPair> supported_block_sizes =
+ {
+ // MatMulKernelInfo(adj_lhs, adj_rhs, M0, N0, K0, export_rhs_to_cl_image = false)
+ // Lhs not-transposed, Rhs-not-transposed
+ // TODO: Test Cases
+ };
+
+ // Set big enough shapes so that block sizes are not truncated. Also, set all dimensions equal
+ // so that it doesn't fail for different NT/T configurations. We aim to test the block sizes here,
+ // not the shapes themselves.
+ const TensorInfo lhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED);
+ const TensorInfo rhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED);
+
+ for(auto &pair : supported_block_sizes)
+ {
+ TensorInfo output_info;
+ Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first);
+
+ ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS);
+ }
+}
+
+TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL)
+{
+ // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations
+ using ShapeConfigurationTuple = std::tuple<TensorShape, TensorShape, TensorShape, bool>;
+ const std::vector<ShapeConfigurationTuple> shape_configurations =
+ {
+ { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U), true },
+ { TensorShape(10U, 12U), TensorShape(3U, 10U), TensorShape(3U), true },
+ { TensorShape(8U, 4U), TensorShape(2U, 8U), TensorShape(2U), true },
+ { TensorShape(8U, 4U), TensorShape(2U, 5U), TensorShape(2U), false }, // Mismatch in the K dimension
+ { TensorShape(5U, 0U), TensorShape(2U, 5U), TensorShape(2U), false }, // Invalid dimension
+ { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), true },
+ { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // no batch broadcasting
+ { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // mismatch in batch dimension
+ { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(1U), false }, // invalid broadcast of bias
+ { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U, 3U), false }, // 2d bias is invalid
+ };
+
+ for(auto &tuple : shape_configurations)
+ {
+ const bool expected = std::get<3>(tuple);
+
+ for(bool adj_lhs :
+ {
+ false, true
+ })
+ {
+ for(bool adj_rhs :
+ {
+ false, true
+ })
+ {
+ TensorShape lhs_shape = std::get<0>(tuple);
+ TensorShape rhs_shape = std::get<1>(tuple);
+ TensorShape bia_shape = std::get<2>(tuple);
+
+ if(adj_lhs)
+ {
+ permute(lhs_shape, PermutationVector(1U, 0U));
+ }
+
+ if(adj_rhs)
+ {
+ permute(rhs_shape, PermutationVector(1U, 0U));
+ }
+
+ const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED);
+ const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED);
+ const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::S32);
+ TensorInfo output_info;
+
+ MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ };
+
+ Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
+ ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+ }
+ }
+ }
+}
+
+TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL)
+{
+ using DataTypeConfigurationTuple = std::tuple<DataType, DataType, DataType, DataType, bool>;
+ const std::vector<DataTypeConfigurationTuple> data_type_configurations =
+ {
+ { DataType::F32, DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types
+ { DataType::F16, DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types
+ { DataType::F64, DataType::F64, DataType::F64, DataType::F64, false }, // no double precision
+ { DataType::QASYMM8, DataType::QASYMM8, DataType::S32, DataType::QASYMM8, true },
+ { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8_SIGNED, true },
+ { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::S32, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QASYMM16, DataType::QASYMM16, DataType::S32, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QSYMM16, DataType::QSYMM16, DataType::S32, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QSYMM8, DataType::QSYMM8, DataType::S32, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported
+ { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8, false }, // no mixed data types
+ { DataType::S64, DataType::S64, DataType::S64, DataType::S64, false }, // no integral types
+ { DataType::S32, DataType::S32, DataType::S32, DataType::S32, false }, // no integral types
+ { DataType::S16, DataType::S16, DataType::S16, DataType::S16, false }, // no integral types
+ { DataType::S8, DataType::S8, DataType::S8, DataType::S8, false }, // no integral types
+ { DataType::U64, DataType::U64, DataType::U64, DataType::U64, false }, // no integral types
+ { DataType::U32, DataType::U32, DataType::U32, DataType::U32, false }, // no integral types
+ { DataType::U16, DataType::U16, DataType::U16, DataType::U16, false }, // no integral types
+ { DataType::U8, DataType::U8, DataType::U8, DataType::U8, false }, // no integral types
+ { DataType::QASYMM8, DataType::QASYMM8, DataType::F32, DataType::QASYMM8, false } // Only S32 bias is supported
+ };
+
+ // It's enough to test a single shape and block size configuration while checking data types
+ const TensorShape shape = TensorShape(48U, 48U);
+ const TensorShape bia_shape = TensorShape(48U);
+ const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 1, false };
+ for(auto &tuple : data_type_configurations)
+ {
+ const bool expected = std::get<4>(tuple);
+
+ const TensorInfo lhs_info(shape, 1, std::get<0>(tuple));
+ const TensorInfo rhs_info(shape, 1, std::get<1>(tuple));
+ const TensorInfo bia_info(bia_shape, 1, std::get<2>(tuple));
+ TensorInfo output_info(shape, 1, std::get<3>(tuple));
+
+ Status status = ClMatMulLowpNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info);
+ ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+ }
+}
+
+TEST_SUITE_END() // Validate
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8_SIGNED)
+
+// TODO: tests
+
+TEST_SUITE_END() // QASYMM8_SIGNED
+TEST_SUITE(QASYMM8)
+
+// TODO: tests
+
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE_END() // Quantized
+TEST_SUITE_END() // MatMulLowpNativeMMULKernel
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute