diff options
author | Ramy Elgammal <ramy.elgammal@arm.com> | 2023-03-09 21:15:37 +0000 |
---|---|---|
committer | Gunes Bayir <gunes.bayir@arm.com> | 2023-03-17 11:00:57 +0000 |
commit | 2b6ebfe4270b06b09e45f306e8384950aeca7e4e (patch) | |
tree | 44a65551aa32352a57090bf550b96cbf5b54222f /src | |
parent | 40aab11ff88ed432d5028478e531f8d0fc404d4c (diff) | |
download | ComputeLibrary-2b6ebfe4270b06b09e45f306e8384950aeca7e4e.tar.gz |
Implement OpenCL MatMul for Lhs NT Rhs T/NT FP32/16
- Implement ClNativeMatMulKernel class
- Implement opencl kernel for LHS non-transposed and RHS non-transposed
- Implement opencl kernel for LHS non-transposed and RHS transposed
- Add test fixture and dataset for matmul
- Implement transpose_tensor() for reference implementation to transpose high dimensional tensors
Resolves: COMPMID-5944, COMPMID-5951
Co-authored-by: Gunes Bayir <gunes.bayir@arm.com>
Co-authored-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: I1d5b8978f41be27baddb3153ade880472141573f
Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9333
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/core/CL/cl_kernels/common/mat_mul.cl | 299 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/tile_helpers.h | 21 | ||||
-rw-r--r-- | src/gpu/cl/ClKernelLibrary.cpp | 8 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClNativeMatMulKernel.cpp | 192 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClNativeMatMulKernel.h | 70 |
5 files changed, 586 insertions, 4 deletions
diff --git a/src/core/CL/cl_kernels/common/mat_mul.cl b/src/core/CL/cl_kernels/common/mat_mul.cl new file mode 100644 index 0000000000..7c74e9d07b --- /dev/null +++ b/src/core/CL/cl_kernels/common/mat_mul.cl @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" +#include "tile_helpers.h" + +#if defined(MAT_MUL_NATIVE_NT_NT) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: F32/F16 + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: F32/F16 + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_nt_nt( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(DATA_TYPE, M0, N0, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = 0.f; + }) + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, K0, N0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, a, b, acc); + + lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += K0 * rhs_stride_y; + } + +#ifdef K % K0 != 0 + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, 1, N0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, a, b, acc); + + lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += 1 * rhs_stride_y; + } +#endif // K % K0 != 0 + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_NT_NT) + +#if defined(MAT_MUL_NATIVE_NT_T) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0, N0 and K0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: F32/F16 + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: F32/F16 + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) + +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * rhs_stride_y + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(DATA_TYPE, M0, N0, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = 0.f; + }) + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // This part is written to decrease the number of loop unrollings caused + // by T_MMUL. The NT/NT version is partly vectorized and uses less number + // of loop unrollings, and code behaves as expected. Although this is not + // a performant solution for the specified architecture, it is necessary + // to overcome some limitations. + TILE(DATA_TYPE, K0, N0, bt); + LOOP_UNROLLING(int, i, 0, 1, N0, + { + LOOP_UNROLLING(int, j, 0, 1, K0, + { + bt[j].s[i] = b[i].s[j]; + }) + }) + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, a, bt, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, T, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + } + +#if K % K0 != 0 + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // See the main loop for the explanation of this part + TILE(DATA_TYPE, 1, N0, bt); + LOOP_UNROLLING(int, i, 0, 1, N0, + { + bt[0].s[i] = b[i].s[0]; + }) + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, a, bt, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, T, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + } +#endif // K % K0 != 0 + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_NT_T)
\ No newline at end of file diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h index 1e4dddd2db..5d397ad333 100644 --- a/src/core/CL/cl_kernels/tile_helpers.h +++ b/src/core/CL/cl_kernels/tile_helpers.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef SRC_CORE_CL_CL_KERNELS_TILE_HELPERS -#define SRC_CORE_CL_CL_KERNELS_TILE_HELPERS +#ifndef ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS +#define ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS // *INDENT-OFF* // clang-format off @@ -1282,6 +1282,21 @@ }) \ } +#define T_MMUL_NT_NT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ + { \ + LOOP_UNROLLING(int, _m, 0, 1, M0, \ + { \ + LOOP_UNROLLING(int, _k, 0, 1, K0, \ + { \ + dst[_m].v = fma((DST_DATA_TYPE)(lhs[_m].s[_k]), (rhs[_k].v), dst[_m].v); \ + }) \ + }) \ + } + #define T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ ({ \ LOOP_UNROLLING(int, _m, 0, 1, M0, \ @@ -1293,4 +1308,4 @@ }) \ }) -#endif /* SRC_CORE_CL_CL_KERNELS_TILE_HELPERS */ +#endif /* ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS */ diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp index f788bedc34..482e8c341d 100644 --- a/src/gpu/cl/ClKernelLibrary.cpp +++ b/src/gpu/cl/ClKernelLibrary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2022 Arm Limited. + * Copyright (c) 2016-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -358,6 +358,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map = { "strided_slice", "common/slice_ops.cl" }, { "tile", "common/tile.cl" }, { "transpose", "common/transpose.cl" }, + { "mat_mul_native_nt_nt", "common/mat_mul.cl" }, + { "mat_mul_native_nt_t", "common/mat_mul.cl" }, #ifdef ENABLE_NCHW_KERNELS { "batch_to_space_nchw", "nchw/batch_to_space.cl" }, { "batch_to_space_static_nchw", "nchw/batch_to_space.cl" }, @@ -781,6 +783,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map = "common/unpooling_layer.cl", #include "./cl_kernels/common/unpooling_layer.clembed" }, + { + "common/mat_mul.cl", +#include "./cl_kernels/common/mat_mul.clembed" + }, #ifdef ENABLE_NCHW_KERNELS { "nchw/batch_to_space.cl", diff --git a/src/gpu/cl/kernels/ClNativeMatMulKernel.cpp b/src/gpu/cl/kernels/ClNativeMatMulKernel.cpp new file mode 100644 index 0000000000..6a4db65922 --- /dev/null +++ b/src/gpu/cl/kernels/ClNativeMatMulKernel.cpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/gpu/cl/kernels/ClNativeMatMulKernel.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "src/core/helpers/AutoConfiguration.h" + +#include "arm_compute/core/ITensorPack.h" +#include "src/common/utils/Log.h" +#include "src/core/helpers/WindowHelpers.h" +#include "support/Cast.h" +#include "utils/TypePrinter.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +namespace +{ +Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info) +{ + const bool adj_lhs = matmul_kernel_info.adj_lhs; + const bool adj_rhs = matmul_kernel_info.adj_rhs; + const int m0 = matmul_kernel_info.m0; + const int n0 = matmul_kernel_info.n0; + const int k0 = matmul_kernel_info.k0; + + // Validate M0 + if(!adj_lhs) + { + // We support any positive integer, but will test & benchmark only 1 to 8 because > 8 will not efficient + ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0 for Lhs non-transposed"); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG((m0 & (m0 - 1)) && (m0 != 3) && (m0 > 16), "Only 1,2,3,4,8,16 are supported for N0 for Lhs transposed"); + } + + // Validate N0 + ARM_COMPUTE_RETURN_ERROR_ON_MSG((n0 & (n0 - 1)) && (n0 != 3) && (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0"); + + // Validate K0 + if(adj_lhs && !adj_rhs) + { + // We support any positive integer, but will test & benchmark only 1 to 8 because > 8 will not efficient + ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0 for Lhs transposed & Rhs non-transposed"); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG((k0 & (k0 - 1)) && (k0 != 3) && (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0"); + } + + return Status{}; +} +} +ClNativeMatMulKernel::ClNativeMatMulKernel() +{ + _type = CLKernelType::GEMM; +} +Status ClNativeMatMulKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); + ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); + + if(output->total_size() != 0) + { + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_batchmatmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, output); + } + ARM_COMPUTE_RETURN_ERROR_ON_MSG(matmul_kernel_info.adj_lhs && matmul_kernel_info.adj_rhs, "LHS T and RHS T not implemented"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(matmul_kernel_info.adj_lhs && !matmul_kernel_info.adj_rhs, "LHS T and RHS NT not implemented"); + + return Status{}; +} +void ClNativeMatMulKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output, &compile_context, &matmul_kernel_info); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output, matmul_kernel_info); + + // output tensor auto initialization if not yet initialized + auto_init_if_empty(*output, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_batchmatmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); + ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, output, matmul_kernel_info)); + + const int m = output->dimension(1); + const int n = output->dimension(0); + const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x(); + + int m0 = std::min(matmul_kernel_info.m0, m); + int n0 = adjust_vec_size(matmul_kernel_info.n0, n); + + // Configure kernel window + Window win = calculate_max_window(*output, Steps(n0, m0)); + win = win.collapse(win, Window::DimZ); + IClKernel::configure_internal(win); + + // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding. + const unsigned int partial_store_m0 = m % m0; // M is output->dimension(1) + const unsigned int partial_store_n0 = n % n0; // N is output->dimension(0) + + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type())); + build_opts.add_option("-DM0=" + support::cpp11::to_string(m0)); + build_opts.add_option("-DN0=" + support::cpp11::to_string(n0)); + build_opts.add_option("-DK0=" + support::cpp11::to_string(matmul_kernel_info.k0)); + build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0)); + build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0)); + build_opts.add_option("-DK=" + support::cpp11::to_string(k)); + + std::string kernel_name("mat_mul_native"); + kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt"; + kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt"; + + if(matmul_kernel_info.adj_lhs) + { + ARM_COMPUTE_ERROR("Only Implemented LHS non-transposed kernels"); + } + + // A macro guard to compile ONLY the kernel of interest + build_opts.add_option("-D" + upper_string(kernel_name)); + + // Create kernel + _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); + + // Set config_id for enabling LWS tuning + _config_id = kernel_name; + _config_id += "_"; + _config_id += lower_string(string_from_data_type(lhs->data_type())); + _config_id += "_"; + _config_id += support::cpp11::to_string(output->dimension(1)); + _config_id += "_"; + _config_id += support::cpp11::to_string(output->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(output->dimension(2)); + _config_id += "_"; + _config_id += support::cpp11::to_string(m0); + _config_id += "_"; + _config_id += support::cpp11::to_string(n0); + _config_id += "_"; + _config_id += support::cpp11::to_string(matmul_kernel_info.k0); +} + +void ClNativeMatMulKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + const ICLTensor *lhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0)); + const ICLTensor *rhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1)); + ICLTensor *output = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST)); + ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output); + + unsigned int idx = 0; + Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ); + + add_3d_tensor_nhw_argument(idx, lhs); + add_3d_tensor_nhw_argument(idx, rhs); + add_3d_tensor_nhw_argument(idx, output); + + enqueue(queue, *this, window_collapsed, lws_hint()); +} + +} // namespace kernels +} // namespace opencl +} // namespace arm_compute diff --git a/src/gpu/cl/kernels/ClNativeMatMulKernel.h b/src/gpu/cl/kernels/ClNativeMatMulKernel.h new file mode 100644 index 0000000000..1cd74365df --- /dev/null +++ b/src/gpu/cl/kernels/ClNativeMatMulKernel.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_SRC_GPU_CL_KERNELS_CLNATIVEMATMULKERNEL +#define ACL_SRC_GPU_CL_KERNELS_CLNATIVEMATMULKERNEL + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/KernelDescriptors.h" +#include "src/core/common/Macros.h" +#include "src/gpu/cl/ClCompileContext.h" +#include "src/gpu/cl/IClKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +class ClNativeMatMulKernel : public IClKernel +{ +public: + ClNativeMatMulKernel(); + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClNativeMatMulKernel); + /** Initialise the kernel's input and output. + * + * @param[in] compile_context The compile context to be used. + * @param[in] lhs Input tensor for the LHS matrix. Data type supported: F32/F16. + * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[in] rhs Input tensor for the RHS matrix. Data type supported: same as @p lhs. + * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[out] output Output tensor info. Data type supported: same as @p lhs + * @param[in] matmul_info Attributes for Batch MatMul Kernel + */ + void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_info); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref ClNativeMatMulKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_info); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; +}; +} // namespace kernels +} // namespace opencl +} // namespace arm_compute +#endif /* ACL_SRC_GPU_CL_KERNELS_CLNATIVEMATMULKERNEL */ |