From 2b6ebfe4270b06b09e45f306e8384950aeca7e4e Mon Sep 17 00:00:00 2001 From: Ramy Elgammal Date: Thu, 9 Mar 2023 21:15:37 +0000 Subject: Implement OpenCL MatMul for Lhs NT Rhs T/NT FP32/16 - Implement ClNativeMatMulKernel class - Implement opencl kernel for LHS non-transposed and RHS non-transposed - Implement opencl kernel for LHS non-transposed and RHS transposed - Add test fixture and dataset for matmul - Implement transpose_tensor() for reference implementation to transpose high dimensional tensors Resolves: COMPMID-5944, COMPMID-5951 Co-authored-by: Gunes Bayir Co-authored-by: Ramy Elgammal Change-Id: I1d5b8978f41be27baddb3153ade880472141573f Signed-off-by: Gunes Bayir Signed-off-by: Ramy Elgammal Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9333 Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Benchmark: Arm Jenkins --- src/core/CL/cl_kernels/common/mat_mul.cl | 299 +++++++++++++++++++++++++++++++ src/core/CL/cl_kernels/tile_helpers.h | 21 ++- 2 files changed, 317 insertions(+), 3 deletions(-) create mode 100644 src/core/CL/cl_kernels/common/mat_mul.cl (limited to 'src/core/CL') diff --git a/src/core/CL/cl_kernels/common/mat_mul.cl b/src/core/CL/cl_kernels/common/mat_mul.cl new file mode 100644 index 0000000000..7c74e9d07b --- /dev/null +++ b/src/core/CL/cl_kernels/common/mat_mul.cl @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" +#include "tile_helpers.h" + +#if defined(MAT_MUL_NATIVE_NT_NT) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: F32/F16 + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: F32/F16 + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_nt_nt( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(DATA_TYPE, M0, N0, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = 0.f; + }) + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, K0, N0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, a, b, acc); + + lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += K0 * rhs_stride_y; + } + +#ifdef K % K0 != 0 + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, 1, N0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, a, b, acc); + + lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += 1 * rhs_stride_y; + } +#endif // K % K0 != 0 + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_NT_NT) + +#if defined(MAT_MUL_NATIVE_NT_T) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0, N0 and K0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: F32/F16 + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: F32/F16 + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) + +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * rhs_stride_y + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(DATA_TYPE, M0, N0, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = 0.f; + }) + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // This part is written to decrease the number of loop unrollings caused + // by T_MMUL. The NT/NT version is partly vectorized and uses less number + // of loop unrollings, and code behaves as expected. Although this is not + // a performant solution for the specified architecture, it is necessary + // to overcome some limitations. + TILE(DATA_TYPE, K0, N0, bt); + LOOP_UNROLLING(int, i, 0, 1, N0, + { + LOOP_UNROLLING(int, j, 0, 1, K0, + { + bt[j].s[i] = b[i].s[j]; + }) + }) + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, a, bt, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, T, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + } + +#if K % K0 != 0 + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // See the main loop for the explanation of this part + TILE(DATA_TYPE, 1, N0, bt); + LOOP_UNROLLING(int, i, 0, 1, N0, + { + bt[0].s[i] = b[i].s[0]; + }) + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, a, bt, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, T, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + } +#endif // K % K0 != 0 + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_NT_T) \ No newline at end of file diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h index 1e4dddd2db..5d397ad333 100644 --- a/src/core/CL/cl_kernels/tile_helpers.h +++ b/src/core/CL/cl_kernels/tile_helpers.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef SRC_CORE_CL_CL_KERNELS_TILE_HELPERS -#define SRC_CORE_CL_CL_KERNELS_TILE_HELPERS +#ifndef ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS +#define ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS // *INDENT-OFF* // clang-format off @@ -1282,6 +1282,21 @@ }) \ } +#define T_MMUL_NT_NT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ + { \ + LOOP_UNROLLING(int, _m, 0, 1, M0, \ + { \ + LOOP_UNROLLING(int, _k, 0, 1, K0, \ + { \ + dst[_m].v = fma((DST_DATA_TYPE)(lhs[_m].s[_k]), (rhs[_k].v), dst[_m].v); \ + }) \ + }) \ + } + #define T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ ({ \ LOOP_UNROLLING(int, _m, 0, 1, M0, \ @@ -1293,4 +1308,4 @@ }) \ }) -#endif /* SRC_CORE_CL_CL_KERNELS_TILE_HELPERS */ +#endif /* ACL_SRC_CORE_CL_CL_KERNELS_TILE_HELPERS */ -- cgit v1.2.1