diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/core/CL/cl_kernels/common/mat_mul.cl | 340 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/tile_helpers.h | 36 | ||||
-rw-r--r-- | src/gpu/cl/ClKernelLibrary.cpp | 6 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClNativeMatMulKernel.cpp | 59 |
4 files changed, 406 insertions, 35 deletions
diff --git a/src/core/CL/cl_kernels/common/mat_mul.cl b/src/core/CL/cl_kernels/common/mat_mul.cl index 7c74e9d07b..956d37a9d8 100644 --- a/src/core/CL/cl_kernels/common/mat_mul.cl +++ b/src/core/CL/cl_kernels/common/mat_mul.cl @@ -29,8 +29,11 @@ * * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float) * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_NT_NT) * @note Only the following configurations of M0, N0 and K0 are currently supported: * - M0 > 0 * - N0 = 1, 2, 3, 4, 8, 16 @@ -44,14 +47,14 @@ * @param[in] lhs_h The height of the lhs tensor * @param[in] lhs_n Number of the matrices (buffers) in the batch * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: F32/F16 + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) * @param[in] rhs_w The width of the rhs tensor * @param[in] rhs_h The height of the rhs tensor * @param[in] rhs_n Number of the matrices (buffers) in the batch * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: F32/F16 + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) * @param[in] dst_w The width of the dst tensor @@ -108,6 +111,7 @@ __kernel void mat_mul_native_nt_nt( } #ifdef K % K0 != 0 + /* Leftover Loop */ for(; k < K; ++k) { TILE(DATA_TYPE, M0, 1, a); @@ -152,8 +156,11 @@ __kernel void mat_mul_native_nt_nt( * * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float) * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_NT_T) * @note Only the following configurations of M0, N0 and K0 are currently supported: * - M0 > 0 * - N0 = 1, 2, 3, 4, 8, 16 @@ -167,14 +174,14 @@ __kernel void mat_mul_native_nt_nt( * @param[in] lhs_h The height of the lhs tensor * @param[in] lhs_n Number of the matrices (buffers) in the batch * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: F32/F16 + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) * @param[in] rhs_w The width of the rhs tensor * @param[in] rhs_h The height of the rhs tensor * @param[in] rhs_n Number of the matrices (buffers) in the batch * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: F32/F16 + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) * @param[in] dst_w The width of the dst tensor @@ -239,7 +246,7 @@ __kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), }) }) T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, a, bt, acc); -#else // GPU_ARCH == GPU_ARCH_MIDGARD +#else // GPU_ARCH == GPU_ARCH_MIDGARD T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, T, a, b, acc); #endif // GPU_ARCH == GPU_ARCH_MIDGARD @@ -276,7 +283,7 @@ __kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), bt[0].s[i] = b[i].s[0]; }) T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, a, bt, acc); -#else // GPU_ARCH == GPU_ARCH_MIDGARD +#else // GPU_ARCH == GPU_ARCH_MIDGARD T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, T, a, b, acc); #endif // GPU_ARCH == GPU_ARCH_MIDGARD @@ -296,4 +303,323 @@ __kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); } -#endif // defined(MAT_MUL_NATIVE_NT_T)
\ No newline at end of file +#endif // defined(MAT_MUL_NATIVE_NT_T) + +#if defined(MAT_MUL_NATIVE_T_NT) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float) + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_T_NT) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 = 1, 2, 3, 4, 8, 16 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 > 0 + * * @note Values > 8 for M0, and K0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_t_nt( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(DATA_TYPE, M0, N0, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = 0.f; + }) + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, K0, M0, a); + TILE(DATA_TYPE, K0, N0, b); + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // For explanation, see mat_mul_native_nt_t + TILE(DATA_TYPE, M0, K0, at); + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, M0, + { + at[j].s[i] = a[i].s[j]; + }) + }) + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, at, b, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, T, NT, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += K0 * lhs_stride_y; + rhs_offset_first_element_in_bytes += K0 * rhs_stride_y; + } + +#ifdef K % K0 != 0 + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, 1, M0, a); + TILE(DATA_TYPE, 1, N0, b); + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // For explanation, see mat_mul_native_nt_t + TILE(DATA_TYPE, M0, 1, at); + LOOP_UNROLLING(int, j, 0, 1, M0, + { + at[j].s[0] = a[0].s[j]; + }) + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, at, b, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, T, NT, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += 1 * lhs_stride_y; + rhs_offset_first_element_in_bytes += 1 * rhs_stride_y; + } +#endif // K % K0 != 0 + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_T_NT) + +#if defined(MAT_MUL_NATIVE_T_T) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float) + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_T_NT) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 = 1, 2, 3, 4, 8, 16 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0, N0 and K0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_t_t( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * rhs_stride_y + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(DATA_TYPE, M0, N0, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = 0.f; + }) + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, K0, M0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, N0, K0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // For explanation, see mat_mul_native_nt_t + TILE(DATA_TYPE, M0, K0, at); + TILE(DATA_TYPE, K0, N0, bt); + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, M0, + { + at[j].s[i] = a[i].s[j]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + LOOP_UNROLLING(int, j, 0, 1, K0, + { + bt[j].s[i] = b[i].s[j]; + }) + }) + + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, NT, NT, at, bt, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, K0, T, T, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += K0 * lhs_stride_y; + rhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + } + +#ifdef K % K0 != 0 + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, 1, M0, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + a[i].v = 0.f; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0.f; + }) + + // Load tile from the lhs/rhs tensors + T_LOAD(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD(DATA_TYPE, N0, 1, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + +#if GPU_ARCH == GPU_ARCH_MIDGARD + // For explanation, see mat_mul_native_nt_t + TILE(DATA_TYPE, M0, 1, at); + TILE(DATA_TYPE, 1, N0, bt); + + LOOP_UNROLLING(int, j, 0, 1, M0, + { + at[j].s[0] = a[0].s[j]; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + bt[0].s[i] = b[i].s[0]; + }) + + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, NT, NT, at, bt, acc); +#else // GPU_ARCH == GPU_ARCH_MIDGARD + T_MMUL(DATA_TYPE, DATA_TYPE, DATA_TYPE, M0, N0, 1, T, T, a, b, acc); +#endif // GPU_ARCH == GPU_ARCH_MIDGARD + + lhs_offset_first_element_in_bytes += 1 * lhs_stride_y; + rhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + } +#endif // K % K0 != 0 + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_T_T) diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h index 5d397ad333..872f4c0b57 100644 --- a/src/core/CL/cl_kernels/tile_helpers.h +++ b/src/core/CL/cl_kernels/tile_helpers.h @@ -1297,6 +1297,42 @@ }) \ } +#define T_MMUL_T_NT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_NT_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_NT_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_NT_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_NT_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ + { \ + LOOP_UNROLLING(int, _m, 0, 1, M0, \ + { \ + LOOP_UNROLLING(int, _n, 0, 1, N0, \ + { \ + LOOP_UNROLLING(int, _k, 0, 1, K0, \ + { \ + dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_k].s[_m]), (DST_DATA_TYPE)(rhs[_k].s[_n]), dst[_m].s[_n]); \ + }) \ + }) \ + }) \ + } + +#define T_MMUL_T_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_T_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_T_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_T_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_T_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ + { \ + LOOP_UNROLLING(int, _m, 0, 1, M0, \ + { \ + LOOP_UNROLLING(int, _n, 0, 1, N0, \ + { \ + LOOP_UNROLLING(int, _k, 0, 1, K0, \ + { \ + dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_k].s[_m]), (DST_DATA_TYPE)(rhs[_n].s[_k]), dst[_m].s[_n]); \ + }) \ + }) \ + }) \ + } + #define T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ ({ \ LOOP_UNROLLING(int, _m, 0, 1, M0, \ diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp index 8099071fcd..44b086f2fc 100644 --- a/src/gpu/cl/ClKernelLibrary.cpp +++ b/src/gpu/cl/ClKernelLibrary.cpp @@ -319,6 +319,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map = { "l2_normalize_x", "common/l2_normalize.cl" }, { "l2_normalize_y", "common/l2_normalize.cl" }, { "l2_normalize_z", "common/l2_normalize.cl" }, + { "mat_mul_native_nt_nt", "common/mat_mul.cl" }, + { "mat_mul_native_nt_t", "common/mat_mul.cl" }, + { "mat_mul_native_t_nt", "common/mat_mul.cl" }, + { "mat_mul_native_t_t", "common/mat_mul.cl" }, { "max_unpooling_layer_2", "common/unpooling_layer.cl" }, { "mean_stddev_normalization", "common/mean_stddev_normalization.cl" }, { "memset", "common/memset.cl" }, @@ -359,8 +363,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map = { "strided_slice", "common/slice_ops.cl" }, { "tile", "common/tile.cl" }, { "transpose", "common/transpose.cl" }, - { "mat_mul_native_nt_nt", "common/mat_mul.cl" }, - { "mat_mul_native_nt_t", "common/mat_mul.cl" }, #ifdef ENABLE_NCHW_KERNELS { "batch_to_space_nchw", "nchw/batch_to_space.cl" }, { "batch_to_space_static_nchw", "nchw/batch_to_space.cl" }, diff --git a/src/gpu/cl/kernels/ClNativeMatMulKernel.cpp b/src/gpu/cl/kernels/ClNativeMatMulKernel.cpp index 6a4db65922..ffbaf49c02 100644 --- a/src/gpu/cl/kernels/ClNativeMatMulKernel.cpp +++ b/src/gpu/cl/kernels/ClNativeMatMulKernel.cpp @@ -50,28 +50,40 @@ Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info) const int k0 = matmul_kernel_info.k0; // Validate M0 - if(!adj_lhs) - { - // We support any positive integer, but will test & benchmark only 1 to 8 because > 8 will not efficient - ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0 for Lhs non-transposed"); - } - else + ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0"); + + if(adj_lhs) { - ARM_COMPUTE_RETURN_ERROR_ON_MSG((m0 & (m0 - 1)) && (m0 != 3) && (m0 > 16), "Only 1,2,3,4,8,16 are supported for N0 for Lhs transposed"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((m0 & (m0 - 1)) && (m0 != 3)) || (m0 > 16), "Only 1,2,3,4,8,16 are supported for N0 for Lhs transposed"); } // Validate N0 - ARM_COMPUTE_RETURN_ERROR_ON_MSG((n0 & (n0 - 1)) && (n0 != 3) && (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 < 1, "Only positive integers are supported for N0"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((n0 & (n0 - 1)) && (n0 != 3)) || (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0"); // Validate K0 - if(adj_lhs && !adj_rhs) + ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0"); + if(!adj_lhs || adj_rhs) { - // We support any positive integer, but will test & benchmark only 1 to 8 because > 8 will not efficient - ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0 for Lhs transposed & Rhs non-transposed"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((k0 & (k0 - 1)) && (k0 != 3)) || (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0"); } - else + + return Status{}; +} + +Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info) +{ + const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x(); + const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y(); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty"); + + constexpr size_t batch_dim_start = 2; + for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i) { - ARM_COMPUTE_RETURN_ERROR_ON_MSG((k0 & (k0 - 1)) && (k0 != 3) && (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported"); } return Status{}; @@ -87,15 +99,14 @@ Status ClNativeMatMulKernel::validate(const ITensorInfo *lhs, const ITensorInfo ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); if(output->total_size() != 0) { - const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_batchmatmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, output); } - ARM_COMPUTE_RETURN_ERROR_ON_MSG(matmul_kernel_info.adj_lhs && matmul_kernel_info.adj_rhs, "LHS T and RHS T not implemented"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(matmul_kernel_info.adj_lhs && !matmul_kernel_info.adj_rhs, "LHS T and RHS NT not implemented"); return Status{}; } @@ -105,14 +116,15 @@ void ClNativeMatMulKernel::configure(const ClCompileContext &compile_context, IT ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output, matmul_kernel_info); // output tensor auto initialization if not yet initialized - auto_init_if_empty(*output, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_batchmatmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); + auto_init_if_empty(*output, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, output, matmul_kernel_info)); - const int m = output->dimension(1); - const int n = output->dimension(0); - const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x(); + const int m = output->dimension(1); + const int n = output->dimension(0); + const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x(); + const bool adj_lhs = matmul_kernel_info.adj_lhs; - int m0 = std::min(matmul_kernel_info.m0, m); + int m0 = adj_lhs ? adjust_vec_size(matmul_kernel_info.m0, m) : std::min(matmul_kernel_info.m0, m); int n0 = adjust_vec_size(matmul_kernel_info.n0, n); // Configure kernel window @@ -137,11 +149,6 @@ void ClNativeMatMulKernel::configure(const ClCompileContext &compile_context, IT kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt"; kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt"; - if(matmul_kernel_info.adj_lhs) - { - ARM_COMPUTE_ERROR("Only Implemented LHS non-transposed kernels"); - } - // A macro guard to compile ONLY the kernel of interest build_opts.add_option("-D" + upper_string(kernel_name)); |