diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/core/CL/cl_kernels/common/mat_mul_quantized.cl | 387 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/tile_helpers.h | 99 | ||||
-rw-r--r-- | src/gpu/cl/ClKernelLibrary.cpp | 6 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp | 224 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h | 69 |
5 files changed, 785 insertions, 0 deletions
diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl new file mode 100644 index 0000000000..c250b4b988 --- /dev/null +++ b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" +#include "tile_helpers.h" + +#if defined(MAT_MUL_NATIVE_QUANTIZED_NT_NT) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar) + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_NT_NT) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8_SIGNED/QASYMM8 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_quantized_nt_nt( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(int, M0, N0, acc); + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET); + }) + + TILE(int, 1, N0, b_sum); + b_sum[0].v = 0; + + TILE(int, 1, M0, a_sum); + a_sum[0].v = 0; + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs tensor + T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + + // Load tile from the rhs tensor in a transposed fashion + // in order to use T_MMUL_NT_T macro because only this macro + // can utilize dot product instruction for Int8/UInt8 by + // directly multiplying the rows of Lhs and Rhs tensors. + T_LOAD_TRANSPOSED(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, K0, + { + a_sum[0].s[i] += (int)a[i].s[j]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += K0 * rhs_stride_y; + } + +#if((K % K0) != 0) + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs tensor + T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + + // Load tile from the rhs tensor in a transposed fashion. + // See the main loop for more explanation + T_LOAD_TRANSPOSED(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, 1, + { + a_sum[0].s[i] += (int)a[i].s[j]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += 1 * rhs_stride_y; + } +#endif // ((K % K0) != 0) + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + acc[i].s[j] += ((int)RHS_OFFSET) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j]; + }) + }) + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + // Quantize the tile + TILE(DATA_TYPE, M0, N0, accq); + T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_QUANTIZED_NT_NT) + +#if defined(MAT_MUL_NATIVE_QUANTIZED_T_NT) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar) + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_T_NT) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0, N0 and K0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_quantized_t_nt( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(int, M0, N0, acc); + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET); + }) + + TILE(int, 1, N0, b_sum); + b_sum[0].v = 0; + + TILE(int, 1, M0, a_sum); + a_sum[0].v = 0; + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs/rhs tensors in a transposed fashion + // see mat_mul_native_quantized_nt_nt main loop for more explanation + T_LOAD_TRANSPOSED(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD_TRANSPOSED(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, M0, + { + a_sum[0].s[j] += (int)a[j].s[i]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += K0 * lhs_stride_y; + rhs_offset_first_element_in_bytes += K0 * rhs_stride_y; + } + +#if((K % K0) != 0) + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs/rhs tensors in a transposed fashion + // see mat_mul_native_quantized_nt_nt main loop for more explanation + T_LOAD_TRANSPOSED(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD_TRANSPOSED(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + LOOP_UNROLLING(int, j, 0, 1, M0, + { + a_sum[0].s[j] += (int)a[j].s[i]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += 1 * lhs_stride_y; + rhs_offset_first_element_in_bytes += 1 * rhs_stride_y; + } +#endif // ((K % K0) != 0) + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + acc[i].s[j] += ((int)(RHS_OFFSET)) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j]; + }) + }) + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + // Quantize the tile + TILE(DATA_TYPE, M0, N0, accq); + T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_QUANTIZED_T_NT) diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h index 872f4c0b57..c9b5370dea 100644 --- a/src/core/CL/cl_kernels/tile_helpers.h +++ b/src/core/CL/cl_kernels/tile_helpers.h @@ -536,6 +536,100 @@ }) \ }) +/** Store a VECTOR variable (e.g. int4, int8, char2 etc.) to a specified column in the TILE object + * + * @param[in] VECTOR Vector variable to store + * @param[in, out] TILE Tile variable to store to + * @param[in] WIDTH Width of the vector variable, also height of the tile (e.g. 2 if char2) + * @param[in] COLUMN Column index of the tile + */ +#define COPY_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, WIDTH, COLUMN) COPY_VECTOR_TO_TILE_COLUMN_STR(VECTOR, TILE, WIDTH, COLUMN) +#define COPY_VECTOR_TO_TILE_COLUMN_STR(VECTOR, TILE, WIDTH, COLUMN) COPY_##WIDTH##_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) +#define COPY_1_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR; \ + }) + +#define COPY_2_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + }) + +#define COPY_3_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + }) + +#define COPY_4_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + TILE[3].s[COLUMN] = VECTOR.s3; \ + }) + +#define COPY_8_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + TILE[3].s[COLUMN] = VECTOR.s3; \ + TILE[4].s[COLUMN] = VECTOR.s4; \ + TILE[5].s[COLUMN] = VECTOR.s5; \ + TILE[6].s[COLUMN] = VECTOR.s6; \ + TILE[7].s[COLUMN] = VECTOR.s7; \ + }) + +#define COPY_16_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + TILE[3].s[COLUMN] = VECTOR.s3; \ + TILE[4].s[COLUMN] = VECTOR.s4; \ + TILE[5].s[COLUMN] = VECTOR.s5; \ + TILE[6].s[COLUMN] = VECTOR.s6; \ + TILE[7].s[COLUMN] = VECTOR.s7; \ + TILE[8].s[COLUMN] = VECTOR.s8; \ + TILE[9].s[COLUMN] = VECTOR.s9; \ + TILE[10].s[COLUMN] = VECTOR.sA; \ + TILE[11].s[COLUMN] = VECTOR.sB; \ + TILE[12].s[COLUMN] = VECTOR.sC; \ + TILE[13].s[COLUMN] = VECTOR.sD; \ + TILE[14].s[COLUMN] = VECTOR.sE; \ + TILE[15].s[COLUMN] = VECTOR.sF; \ + }) + +/** Load SRC_HEIGHT x SRC_WIDTH elements from global memory (tensor), and store them in a SRC_WIDTH x SRC_HEIGHT tile + * + * @param[in] DATA_TYPE Data type + * @param[in] SRC_HEIGHT Number of source rows, or number of columns of the output tile + * @param[in] SRC_WIDTH Number of source columns, or number of tile rows + * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). + * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16) + * @param[in] TENSOR Tensor basename + * @param[in] X Starting X position + * @param[in] Y Starting Y position + * @param[in] YI_MULTIPLIER Parameter used to multiply the internal row increment (_i). + * In common cases should be 1 but it becomes useful when we want to load rows which are multiple of STRIDE_Y. + * (e.g. loading the weights of convolution layer). + * In this case the address calculation is performed as: (Y + _i * Y_MULTIPLIER) * STRIDE_Y + * @param[in] STRIDE_Y Stride Y (in bytes) used to load each row. + * @param[out] dst Output tile + */ +#define T_LOAD_TRANSPOSED(DATA_TYPE, SRC_HEIGHT, SRC_WIDTH, TENSOR_TYPE, TENSOR, X, Y, YI_MULTIPLIER, STRIDE_Y, dst) \ + ({ \ + LOOP_UNROLLING(int, _i, 0, 1, SRC_HEIGHT, \ + { \ + VEC_DATA_TYPE(DATA_TYPE, SRC_WIDTH) \ + tmp = V_LOAD(DATA_TYPE, SRC_WIDTH, TENSOR_TYPE, TENSOR, X, ((Y) + _i * (int)(YI_MULTIPLIER)), STRIDE_Y); \ + COPY_VECTOR_TO_TILE_COLUMN(tmp, dst, SRC_WIDTH, _i); \ + }) \ + }) + /** Load a tile from global memory (tensor) using an indirect Y index tile * * @param[in] DATA_TYPE Data type @@ -1259,6 +1353,11 @@ * @param[in] lhs LHS tile * @param[in] rhs RHS tile * @param[in, out] dst DST tile + * + * @note For Int8/UInt8 multiplications, we only have T_MMUL_NT_T because we need + * the multiply the rows of Lhs and Rhs tensors to utilize dot product extension. + * Addition of other versions requires dealing with on the fly transposition of + * these tile elements and therefore is not favored. */ #define T_MMUL(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, LHS_LAYOUT, RHS_LAYOUT, lhs, rhs, dst) T_MMUL_##LHS_LAYOUT##_##RHS_LAYOUT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) #define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp index 44b086f2fc..e657687887 100644 --- a/src/gpu/cl/ClKernelLibrary.cpp +++ b/src/gpu/cl/ClKernelLibrary.cpp @@ -323,6 +323,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map = { "mat_mul_native_nt_t", "common/mat_mul.cl" }, { "mat_mul_native_t_nt", "common/mat_mul.cl" }, { "mat_mul_native_t_t", "common/mat_mul.cl" }, + { "mat_mul_native_quantized_nt_nt", "common/mat_mul_quantized.cl" }, + { "mat_mul_native_quantized_t_nt", "common/mat_mul_quantized.cl" }, { "max_unpooling_layer_2", "common/unpooling_layer.cl" }, { "mean_stddev_normalization", "common/mean_stddev_normalization.cl" }, { "memset", "common/memset.cl" }, @@ -794,6 +796,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map = "common/mat_mul.cl", #include "./cl_kernels/common/mat_mul.clembed" }, + { + "common/mat_mul_quantized.cl", +#include "./cl_kernels/common/mat_mul_quantized.clembed" + }, #ifdef ENABLE_NCHW_KERNELS { "nchw/batch_to_space.cl", diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp new file mode 100644 index 0000000000..d5ecdf7dd2 --- /dev/null +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/ITensorPack.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + +#include "src/common/utils/Log.h" +#include "src/core/helpers/AutoConfiguration.h" +#include "src/core/helpers/WindowHelpers.h" +#include "src/gpu/cl/ClCompileContext.h" + +#include "support/Cast.h" +#include "support/StringSupport.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +namespace +{ +Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info) +{ + const bool adj_lhs = matmul_kernel_info.adj_lhs; + const bool adj_rhs = matmul_kernel_info.adj_rhs; + const int m0 = matmul_kernel_info.m0; + const int n0 = matmul_kernel_info.n0; + const int k0 = matmul_kernel_info.k0; + + // Validate M0 + ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0"); + + if(adj_lhs) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((m0 & (m0 - 1)) && (m0 != 3)) || (m0 > 16), "Only 1,2,3,4,8,16 are supported for M0 for Lhs transposed"); + } + + // Validate N0 + ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 < 1, "Only positive integers are supported for N0"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((n0 & (n0 - 1)) && (n0 != 3)) || (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0"); + + // Validate K0 + ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0"); + if(!adj_lhs || adj_rhs) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((k0 & (k0 - 1)) && (k0 != 3)) || (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0"); + } + + return Status{}; +} + +Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info) +{ + const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x(); + const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y(); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty"); + + constexpr size_t batch_dim_start = 2; + for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported"); + } + + return Status{}; +} +} +ClMatMulLowpNativeKernel::ClMatMulLowpNativeKernel() +{ + _type = CLKernelType::GEMM; +} +Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); + ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + + if(output->total_size() != 0) + { + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, output); + } + + return Status{}; +} +void ClMatMulLowpNativeKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output, &compile_context, &matmul_kernel_info); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output, matmul_kernel_info); + ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, output, matmul_kernel_info)); + + // output tensor auto initialization if not yet initialized + auto_init_if_empty(*output, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); + + const int m = output->dimension(1); + const int n = output->dimension(0); + const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x(); + const bool adj_lhs = matmul_kernel_info.adj_lhs; + + int m0 = adj_lhs ? adjust_vec_size(matmul_kernel_info.m0, m) : std::min(matmul_kernel_info.m0, m); + int n0 = adjust_vec_size(matmul_kernel_info.n0, n); + + // Configure kernel window + Window win = calculate_max_window(*output, Steps(n0, m0)); + win = win.collapse(win, Window::DimZ); + IClKernel::configure_internal(win); + + // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding. + const unsigned int partial_store_m0 = m % m0; + const unsigned int partial_store_n0 = n % n0; + + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type())); + build_opts.add_option("-DM0=" + support::cpp11::to_string(m0)); + build_opts.add_option("-DN0=" + support::cpp11::to_string(n0)); + build_opts.add_option("-DK0=" + support::cpp11::to_string(matmul_kernel_info.k0)); + build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0)); + build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0)); + build_opts.add_option("-DK=" + support::cpp11::to_string(k)); + + const UniformQuantizationInfo lqinfo = lhs->quantization_info().uniform(); + const UniformQuantizationInfo rqinfo = rhs->quantization_info().uniform(); + const UniformQuantizationInfo dqinfo = output->quantization_info().uniform(); + + float multiplier = lqinfo.scale * rqinfo.scale / dqinfo.scale; + int output_multiplier = 0; + int output_shift = 0; + arm_compute::quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift); + + build_opts.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier)); + build_opts.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift)); + + build_opts.add_option("-DLHS_OFFSET=" + support::cpp11::to_string(-lqinfo.offset)); // Note this is passed as negative to maintain similarity with CLDirectConv2D + build_opts.add_option("-DRHS_OFFSET=" + support::cpp11::to_string(-rqinfo.offset)); // Note this is passed as negative to maintain similarity with CLDirectConv2D + build_opts.add_option("-DDST_OFFSET=" + support::cpp11::to_string(dqinfo.offset)); // Passed as positive (unlike the above two) + + std::string kernel_name("mat_mul_native_quantized"); + kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt"; + kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt"; + + // A macro guard to compile ONLY the kernel of interest + build_opts.add_option("-D" + upper_string(kernel_name)); + + // Create kernel + _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); + + // Set config_id for enabling LWS tuning + const size_t number_of_batches = output->tensor_shape().total_size() / (m * n); + + _config_id = kernel_name; + _config_id += "_"; + _config_id += lower_string(string_from_data_type(lhs->data_type())); + _config_id += "_"; + _config_id += support::cpp11::to_string(m); + _config_id += "_"; + _config_id += support::cpp11::to_string(n); + _config_id += "_"; + _config_id += support::cpp11::to_string(k); + _config_id += "_"; + _config_id += support::cpp11::to_string(number_of_batches); + _config_id += "_"; + _config_id += support::cpp11::to_string(m0); + _config_id += "_"; + _config_id += support::cpp11::to_string(n0); + _config_id += "_"; + _config_id += support::cpp11::to_string(matmul_kernel_info.k0); +} + +void ClMatMulLowpNativeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + const ICLTensor *lhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0)); + const ICLTensor *rhs = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1)); + ICLTensor *output = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST)); + ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output); + + unsigned int idx = 0; + Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ); + + add_3d_tensor_nhw_argument(idx, lhs); + add_3d_tensor_nhw_argument(idx, rhs); + add_3d_tensor_nhw_argument(idx, output); + + enqueue(queue, *this, window_collapsed, lws_hint()); +} + +} // namespace kernels +} // namespace opencl +} // namespace arm_compute diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h new file mode 100644 index 0000000000..13a33fbd62 --- /dev/null +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEKERNEL +#define ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEKERNEL + +#include "src/core/common/Macros.h" +#include "src/gpu/cl/ClCompileContext.h" +#include "src/gpu/cl/IClKernel.h" + +namespace arm_compute +{ +// Forward declerations +struct MatMulKernelInfo; +namespace opencl +{ +namespace kernels +{ +class ClMatMulLowpNativeKernel : public IClKernel +{ +public: + ClMatMulLowpNativeKernel(); + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClMatMulLowpNativeKernel); + /** Initialise the kernel's input and output. + * + * @param[in] compile_context The compile context to be used. + * @param[in] lhs Input tensor for the LHS matrix. Data type supported: QASYMM8_SIGNED/QASYMM8. + * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[in] rhs Input tensor for the RHS matrix. Data type supported: same as @p lhs. + * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[out] output Output tensor info. Data type supported: same as @p lhs + * @param[in] matmul_info Attributes for Batch MatMul Kernel + */ + void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_info); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref ClMatMulLowpNativeKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_info); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; +}; +} // namespace kernels +} // namespace opencl +} // namespace arm_compute +#endif /* ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEKERNEL */ |