From 9d0c4deb760efc2ca07e5e0b8218995201ad8a1f Mon Sep 17 00:00:00 2001 From: Gunes Bayir Date: Thu, 13 Apr 2023 18:22:58 +0100 Subject: Add quantized CL MatMul kernels for Lhs NT/T, Rhs NT Implement OpenCL kernels for batched Matrix Multiplication for the quantized data types QASYMM8 and QASYMM8_SIGNED. Quantized MatMul is supported with the following MatMul attributes: * adj_x = false, adj_y = false * adj_x = true, adj_y = false We consider native format kernels only. In other words, no reshaping of the operand matrices is done. Resolves: COMPMID-5921, COMPMID-5922 Change-Id: I99e0f68054a2bd635c60ec2641acc2e7ff398473 Signed-off-by: Omar Al Khatib Signed-off-by: Gunes Bayir Signed-off-by: Jakub Sujak Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9435 Reviewed-by: SiCong Li Reviewed-by: Viet-Hoa Do Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- Android.bp | 2 + SConscript | 1 + filelist.json | 1 + src/core/CL/cl_kernels/common/mat_mul_quantized.cl | 387 +++++++++++++++++++++ src/core/CL/cl_kernels/tile_helpers.h | 99 ++++++ src/gpu/cl/ClKernelLibrary.cpp | 6 + src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp | 224 ++++++++++++ src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h | 69 ++++ tests/validation/CL/MatMulKernel.cpp | 286 +++++++-------- tests/validation/CL/MatMulLowpNativeKernel.cpp | 337 ++++++++++++++++++ tests/validation/Helpers.cpp | 102 +++++- tests/validation/Helpers.h | 13 +- tests/validation/fixtures/MatMulKernelFixture.h | 130 +++++-- 13 files changed, 1486 insertions(+), 171 deletions(-) create mode 100644 src/core/CL/cl_kernels/common/mat_mul_quantized.cl create mode 100644 src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp create mode 100644 src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h create mode 100644 tests/validation/CL/MatMulLowpNativeKernel.cpp diff --git a/Android.bp b/Android.bp index 4bd307447b..32651b539c 100644 --- a/Android.bp +++ b/Android.bp @@ -51,6 +51,7 @@ opencl_srcs = [ "src/core/CL/cl_kernels/common/instance_normalization.cl", "src/core/CL/cl_kernels/common/l2_normalize.cl", "src/core/CL/cl_kernels/common/mat_mul.cl", + "src/core/CL/cl_kernels/common/mat_mul_quantized.cl", "src/core/CL/cl_kernels/common/mean_stddev_normalization.cl", "src/core/CL/cl_kernels/common/memset.cl", "src/core/CL/cl_kernels/common/minmax_layer.cl", @@ -695,6 +696,7 @@ cc_library_static { "src/gpu/cl/kernels/ClIm2ColKernel.cpp", "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp", "src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp", + "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp", "src/gpu/cl/kernels/ClMatMulNativeKernel.cpp", "src/gpu/cl/kernels/ClMulKernel.cpp", "src/gpu/cl/kernels/ClPermuteKernel.cpp", diff --git a/SConscript b/SConscript index 03b94b6bd1..e6ef73cc34 100644 --- a/SConscript +++ b/SConscript @@ -390,6 +390,7 @@ if env['opencl'] and env['embed_kernels']: 'src/core/CL/cl_kernels/common/instance_normalization.cl', 'src/core/CL/cl_kernels/common/l2_normalize.cl', 'src/core/CL/cl_kernels/common/mat_mul.cl', + 'src/core/CL/cl_kernels/common/mat_mul_quantized.cl', 'src/core/CL/cl_kernels/common/mean_stddev_normalization.cl', 'src/core/CL/cl_kernels/common/memset.cl', 'src/core/CL/cl_kernels/common/minmax_layer.cl', diff --git a/filelist.json b/filelist.json index 5418c2bfd0..ed12dca8b3 100644 --- a/filelist.json +++ b/filelist.json @@ -512,6 +512,7 @@ "MatMul": { "files": { "common": [ + "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp", "src/gpu/cl/kernels/ClMatMulNativeKernel.cpp", "src/gpu/cl/operators/ClMatMul.cpp", "src/runtime/CL/functions/CLMatMul.cpp" diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl new file mode 100644 index 0000000000..c250b4b988 --- /dev/null +++ b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" +#include "tile_helpers.h" + +#if defined(MAT_MUL_NATIVE_QUANTIZED_NT_NT) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar) + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_NT_NT) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8_SIGNED/QASYMM8 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_quantized_nt_nt( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * lhs_stride_y + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(int, M0, N0, acc); + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET); + }) + + TILE(int, 1, N0, b_sum); + b_sum[0].v = 0; + + TILE(int, 1, M0, a_sum); + a_sum[0].v = 0; + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs tensor + T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + + // Load tile from the rhs tensor in a transposed fashion + // in order to use T_MMUL_NT_T macro because only this macro + // can utilize dot product instruction for Int8/UInt8 by + // directly multiplying the rows of Lhs and Rhs tensors. + T_LOAD_TRANSPOSED(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, K0, + { + a_sum[0].s[i] += (int)a[i].s[j]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += K0 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += K0 * rhs_stride_y; + } + +#if((K % K0) != 0) + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs tensor + T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + + // Load tile from the rhs tensor in a transposed fashion. + // See the main loop for more explanation + T_LOAD_TRANSPOSED(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, 1, + { + a_sum[0].s[i] += (int)a[i].s[j]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += 1 * sizeof(DATA_TYPE); + rhs_offset_first_element_in_bytes += 1 * rhs_stride_y; + } +#endif // ((K % K0) != 0) + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + acc[i].s[j] += ((int)RHS_OFFSET) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j]; + }) + }) + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + // Quantize the tile + TILE(DATA_TYPE, M0, N0, accq); + T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_QUANTIZED_NT_NT) + +#if defined(MAT_MUL_NATIVE_QUANTIZED_T_NT) +/** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS transposed, RHS non-transposed + * + * @note the "batch" here expresses the number of matrix multiplications to run in parallel. However, it + * should NOT be confused with the batch size of the model. For NHWC the "batch" is the "H" dimension + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=uchar) + * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). + * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) + * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) + * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_T_NT) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 > 0 + * - N0 = 1, 2, 3, 4, 8, 16 + * - K0 = 1, 2, 3, 4, 8, 16 + * @note Values > 8 for M0, N0 and K0 are not expected to be efficient + * + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + */ +__kernel void mat_mul_native_quantized_t_nt( + TENSOR3D_T(lhs, BUFFER), + TENSOR3D_T(rhs, BUFFER), + TENSOR3D_T(dst, BUFFER)) +{ + const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); + const uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); + const uint z = GET_SPATIAL_IDX(2, 1, 0); + + // Compute LHS/RHS/DST matrix address + lhs_offset_first_element_in_bytes += y * sizeof(DATA_TYPE) + z * lhs_stride_z; + rhs_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + z * rhs_stride_z; + dst_offset_first_element_in_bytes += x * sizeof(DATA_TYPE) + y * dst_stride_y + z * dst_stride_z; + + // Initialize the accumulators + TILE(int, M0, N0, acc); + LOOP_UNROLLING(int, i, 0, 1, M0, + { + acc[i].v = K * ((int)LHS_OFFSET) * ((int)RHS_OFFSET); + }) + + TILE(int, 1, N0, b_sum); + b_sum[0].v = 0; + + TILE(int, 1, M0, a_sum); + a_sum[0].v = 0; + + int k; + for(k = 0; k <= K - K0; k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs/rhs tensors in a transposed fashion + // see mat_mul_native_quantized_nt_nt main loop for more explanation + T_LOAD_TRANSPOSED(DATA_TYPE, K0, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD_TRANSPOSED(DATA_TYPE, K0, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, K0, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, M0, + { + a_sum[0].s[j] += (int)a[j].s[i]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, K0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += K0 * lhs_stride_y; + rhs_offset_first_element_in_bytes += K0 * rhs_stride_y; + } + +#if((K % K0) != 0) + /* Leftover Loop */ + for(; k < K; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = 0; + }) + + LOOP_UNROLLING(int, i, 0, 1, N0, + { + b[i].v = 0; + }) + + // Load tile from the lhs/rhs tensors in a transposed fashion + // see mat_mul_native_quantized_nt_nt main loop for more explanation + T_LOAD_TRANSPOSED(DATA_TYPE, 1, M0, BUFFER, lhs, 0, 0, 1, lhs_stride_y, a); + T_LOAD_TRANSPOSED(DATA_TYPE, 1, N0, BUFFER, rhs, 0, 0, 1, rhs_stride_y, b); + + T_MMUL(DATA_TYPE, DATA_TYPE, int, M0, N0, 1, NT, T, a, b, acc); + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + LOOP_UNROLLING(int, j, 0, 1, M0, + { + a_sum[0].s[j] += (int)a[j].s[i]; + }) + }) + + LOOP_UNROLLING(int, i, 0, 1, 1, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + b_sum[0].s[j] += (int)b[j].s[i]; + }) + }) + + lhs_offset_first_element_in_bytes += 1 * lhs_stride_y; + rhs_offset_first_element_in_bytes += 1 * rhs_stride_y; + } +#endif // ((K % K0) != 0) + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + LOOP_UNROLLING(int, j, 0, 1, N0, + { + acc[i].s[j] += ((int)(RHS_OFFSET)) * a_sum[0].s[i] + ((int)(LHS_OFFSET)) * b_sum[0].s[j]; + }) + }) + + const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; + const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; + + // Quantize the tile + TILE(DATA_TYPE, M0, N0, accq); + T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + + TILE(int, M0, 1, indirect_buffer); + LOOP_UNROLLING(int, _i, 0, 1, M0, + { + indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); + }); + + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, accq, indirect_buffer); +} +#endif // defined(MAT_MUL_NATIVE_QUANTIZED_T_NT) diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h index 872f4c0b57..c9b5370dea 100644 --- a/src/core/CL/cl_kernels/tile_helpers.h +++ b/src/core/CL/cl_kernels/tile_helpers.h @@ -536,6 +536,100 @@ }) \ }) +/** Store a VECTOR variable (e.g. int4, int8, char2 etc.) to a specified column in the TILE object + * + * @param[in] VECTOR Vector variable to store + * @param[in, out] TILE Tile variable to store to + * @param[in] WIDTH Width of the vector variable, also height of the tile (e.g. 2 if char2) + * @param[in] COLUMN Column index of the tile + */ +#define COPY_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, WIDTH, COLUMN) COPY_VECTOR_TO_TILE_COLUMN_STR(VECTOR, TILE, WIDTH, COLUMN) +#define COPY_VECTOR_TO_TILE_COLUMN_STR(VECTOR, TILE, WIDTH, COLUMN) COPY_##WIDTH##_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) +#define COPY_1_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR; \ + }) + +#define COPY_2_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + }) + +#define COPY_3_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + }) + +#define COPY_4_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + TILE[3].s[COLUMN] = VECTOR.s3; \ + }) + +#define COPY_8_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + TILE[3].s[COLUMN] = VECTOR.s3; \ + TILE[4].s[COLUMN] = VECTOR.s4; \ + TILE[5].s[COLUMN] = VECTOR.s5; \ + TILE[6].s[COLUMN] = VECTOR.s6; \ + TILE[7].s[COLUMN] = VECTOR.s7; \ + }) + +#define COPY_16_VECTOR_TO_TILE_COLUMN(VECTOR, TILE, COLUMN) \ + ({ \ + TILE[0].s[COLUMN] = VECTOR.s0; \ + TILE[1].s[COLUMN] = VECTOR.s1; \ + TILE[2].s[COLUMN] = VECTOR.s2; \ + TILE[3].s[COLUMN] = VECTOR.s3; \ + TILE[4].s[COLUMN] = VECTOR.s4; \ + TILE[5].s[COLUMN] = VECTOR.s5; \ + TILE[6].s[COLUMN] = VECTOR.s6; \ + TILE[7].s[COLUMN] = VECTOR.s7; \ + TILE[8].s[COLUMN] = VECTOR.s8; \ + TILE[9].s[COLUMN] = VECTOR.s9; \ + TILE[10].s[COLUMN] = VECTOR.sA; \ + TILE[11].s[COLUMN] = VECTOR.sB; \ + TILE[12].s[COLUMN] = VECTOR.sC; \ + TILE[13].s[COLUMN] = VECTOR.sD; \ + TILE[14].s[COLUMN] = VECTOR.sE; \ + TILE[15].s[COLUMN] = VECTOR.sF; \ + }) + +/** Load SRC_HEIGHT x SRC_WIDTH elements from global memory (tensor), and store them in a SRC_WIDTH x SRC_HEIGHT tile + * + * @param[in] DATA_TYPE Data type + * @param[in] SRC_HEIGHT Number of source rows, or number of columns of the output tile + * @param[in] SRC_WIDTH Number of source columns, or number of tile rows + * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). + * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16) + * @param[in] TENSOR Tensor basename + * @param[in] X Starting X position + * @param[in] Y Starting Y position + * @param[in] YI_MULTIPLIER Parameter used to multiply the internal row increment (_i). + * In common cases should be 1 but it becomes useful when we want to load rows which are multiple of STRIDE_Y. + * (e.g. loading the weights of convolution layer). + * In this case the address calculation is performed as: (Y + _i * Y_MULTIPLIER) * STRIDE_Y + * @param[in] STRIDE_Y Stride Y (in bytes) used to load each row. + * @param[out] dst Output tile + */ +#define T_LOAD_TRANSPOSED(DATA_TYPE, SRC_HEIGHT, SRC_WIDTH, TENSOR_TYPE, TENSOR, X, Y, YI_MULTIPLIER, STRIDE_Y, dst) \ + ({ \ + LOOP_UNROLLING(int, _i, 0, 1, SRC_HEIGHT, \ + { \ + VEC_DATA_TYPE(DATA_TYPE, SRC_WIDTH) \ + tmp = V_LOAD(DATA_TYPE, SRC_WIDTH, TENSOR_TYPE, TENSOR, X, ((Y) + _i * (int)(YI_MULTIPLIER)), STRIDE_Y); \ + COPY_VECTOR_TO_TILE_COLUMN(tmp, dst, SRC_WIDTH, _i); \ + }) \ + }) + /** Load a tile from global memory (tensor) using an indirect Y index tile * * @param[in] DATA_TYPE Data type @@ -1259,6 +1353,11 @@ * @param[in] lhs LHS tile * @param[in] rhs RHS tile * @param[in, out] dst DST tile + * + * @note For Int8/UInt8 multiplications, we only have T_MMUL_NT_T because we need + * the multiply the rows of Lhs and Rhs tensors to utilize dot product extension. + * Addition of other versions requires dealing with on the fly transposition of + * these tile elements and therefore is not favored. */ #define T_MMUL(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, LHS_LAYOUT, RHS_LAYOUT, lhs, rhs, dst) T_MMUL_##LHS_LAYOUT##_##RHS_LAYOUT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) #define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp index 44b086f2fc..e657687887 100644 --- a/src/gpu/cl/ClKernelLibrary.cpp +++ b/src/gpu/cl/ClKernelLibrary.cpp @@ -323,6 +323,8 @@ const std::map ClKernelLibrary::_kernel_program_map = { "mat_mul_native_nt_t", "common/mat_mul.cl" }, { "mat_mul_native_t_nt", "common/mat_mul.cl" }, { "mat_mul_native_t_t", "common/mat_mul.cl" }, + { "mat_mul_native_quantized_nt_nt", "common/mat_mul_quantized.cl" }, + { "mat_mul_native_quantized_t_nt", "common/mat_mul_quantized.cl" }, { "max_unpooling_layer_2", "common/unpooling_layer.cl" }, { "mean_stddev_normalization", "common/mean_stddev_normalization.cl" }, { "memset", "common/memset.cl" }, @@ -794,6 +796,10 @@ const std::map ClKernelLibrary::_program_source_map = "common/mat_mul.cl", #include "./cl_kernels/common/mat_mul.clembed" }, + { + "common/mat_mul_quantized.cl", +#include "./cl_kernels/common/mat_mul_quantized.clembed" + }, #ifdef ENABLE_NCHW_KERNELS { "nchw/batch_to_space.cl", diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp new file mode 100644 index 0000000000..d5ecdf7dd2 --- /dev/null +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/ITensorPack.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + +#include "src/common/utils/Log.h" +#include "src/core/helpers/AutoConfiguration.h" +#include "src/core/helpers/WindowHelpers.h" +#include "src/gpu/cl/ClCompileContext.h" + +#include "support/Cast.h" +#include "support/StringSupport.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +namespace +{ +Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info) +{ + const bool adj_lhs = matmul_kernel_info.adj_lhs; + const bool adj_rhs = matmul_kernel_info.adj_rhs; + const int m0 = matmul_kernel_info.m0; + const int n0 = matmul_kernel_info.n0; + const int k0 = matmul_kernel_info.k0; + + // Validate M0 + ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0"); + + if(adj_lhs) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((m0 & (m0 - 1)) && (m0 != 3)) || (m0 > 16), "Only 1,2,3,4,8,16 are supported for M0 for Lhs transposed"); + } + + // Validate N0 + ARM_COMPUTE_RETURN_ERROR_ON_MSG(n0 < 1, "Only positive integers are supported for N0"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((n0 & (n0 - 1)) && (n0 != 3)) || (n0 > 16), "Only 1,2,3,4,8,16 are supported for N0"); + + // Validate K0 + ARM_COMPUTE_RETURN_ERROR_ON_MSG(k0 < 1, "Only positive integers are supported for K0"); + if(!adj_lhs || adj_rhs) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(((k0 & (k0 - 1)) && (k0 != 3)) || (k0 > 16), "Only 1,2,3,4,8,16 are supported for K0"); + } + + return Status{}; +} + +Status validate_input_shapes(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const MatMulKernelInfo &matmul_kernel_info) +{ + const size_t lhs_k = matmul_kernel_info.adj_lhs ? lhs_shape.y() : lhs_shape.x(); + const size_t rhs_k = matmul_kernel_info.adj_rhs ? rhs_shape.x() : rhs_shape.y(); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_k != rhs_k, "K dimension in Lhs and Rhs matrices must match."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape.total_size() == 0, "Lhs tensor can't be empty"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_shape.total_size() == 0, "Rhs tensor can't be empty"); + + constexpr size_t batch_dim_start = 2; + for(size_t i = batch_dim_start; i < Coordinates::num_max_dimensions; ++i) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_shape[i] != rhs_shape[i], "Batch dimension broadcasting is not supported"); + } + + return Status{}; +} +} +ClMatMulLowpNativeKernel::ClMatMulLowpNativeKernel() +{ + _type = CLKernelType::GEMM; +} +Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); + ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + + if(output->total_size() != 0) + { + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, output); + } + + return Status{}; +} +void ClMatMulLowpNativeKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_kernel_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output, &compile_context, &matmul_kernel_info); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output, matmul_kernel_info); + ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, output, matmul_kernel_info)); + + // output tensor auto initialization if not yet initialized + auto_init_if_empty(*output, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); + + const int m = output->dimension(1); + const int n = output->dimension(0); + const int k = matmul_kernel_info.adj_lhs ? lhs->tensor_shape().y() : lhs->tensor_shape().x(); + const bool adj_lhs = matmul_kernel_info.adj_lhs; + + int m0 = adj_lhs ? adjust_vec_size(matmul_kernel_info.m0, m) : std::min(matmul_kernel_info.m0, m); + int n0 = adjust_vec_size(matmul_kernel_info.n0, n); + + // Configure kernel window + Window win = calculate_max_window(*output, Steps(n0, m0)); + win = win.collapse(win, Window::DimZ); + IClKernel::configure_internal(win); + + // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding. + const unsigned int partial_store_m0 = m % m0; + const unsigned int partial_store_n0 = n % n0; + + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(lhs->data_type())); + build_opts.add_option("-DM0=" + support::cpp11::to_string(m0)); + build_opts.add_option("-DN0=" + support::cpp11::to_string(n0)); + build_opts.add_option("-DK0=" + support::cpp11::to_string(matmul_kernel_info.k0)); + build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0)); + build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0)); + build_opts.add_option("-DK=" + support::cpp11::to_string(k)); + + const UniformQuantizationInfo lqinfo = lhs->quantization_info().uniform(); + const UniformQuantizationInfo rqinfo = rhs->quantization_info().uniform(); + const UniformQuantizationInfo dqinfo = output->quantization_info().uniform(); + + float multiplier = lqinfo.scale * rqinfo.scale / dqinfo.scale; + int output_multiplier = 0; + int output_shift = 0; + arm_compute::quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift); + + build_opts.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier)); + build_opts.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift)); + + build_opts.add_option("-DLHS_OFFSET=" + support::cpp11::to_string(-lqinfo.offset)); // Note this is passed as negative to maintain similarity with CLDirectConv2D + build_opts.add_option("-DRHS_OFFSET=" + support::cpp11::to_string(-rqinfo.offset)); // Note this is passed as negative to maintain similarity with CLDirectConv2D + build_opts.add_option("-DDST_OFFSET=" + support::cpp11::to_string(dqinfo.offset)); // Passed as positive (unlike the above two) + + std::string kernel_name("mat_mul_native_quantized"); + kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt"; + kernel_name += matmul_kernel_info.adj_rhs ? "_t" : "_nt"; + + // A macro guard to compile ONLY the kernel of interest + build_opts.add_option("-D" + upper_string(kernel_name)); + + // Create kernel + _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); + + // Set config_id for enabling LWS tuning + const size_t number_of_batches = output->tensor_shape().total_size() / (m * n); + + _config_id = kernel_name; + _config_id += "_"; + _config_id += lower_string(string_from_data_type(lhs->data_type())); + _config_id += "_"; + _config_id += support::cpp11::to_string(m); + _config_id += "_"; + _config_id += support::cpp11::to_string(n); + _config_id += "_"; + _config_id += support::cpp11::to_string(k); + _config_id += "_"; + _config_id += support::cpp11::to_string(number_of_batches); + _config_id += "_"; + _config_id += support::cpp11::to_string(m0); + _config_id += "_"; + _config_id += support::cpp11::to_string(n0); + _config_id += "_"; + _config_id += support::cpp11::to_string(matmul_kernel_info.k0); +} + +void ClMatMulLowpNativeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + const ICLTensor *lhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); + const ICLTensor *rhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); + ICLTensor *output = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); + ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, output); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, output); + + unsigned int idx = 0; + Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ); + + add_3d_tensor_nhw_argument(idx, lhs); + add_3d_tensor_nhw_argument(idx, rhs); + add_3d_tensor_nhw_argument(idx, output); + + enqueue(queue, *this, window_collapsed, lws_hint()); +} + +} // namespace kernels +} // namespace opencl +} // namespace arm_compute diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h new file mode 100644 index 0000000000..13a33fbd62 --- /dev/null +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEKERNEL +#define ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEKERNEL + +#include "src/core/common/Macros.h" +#include "src/gpu/cl/ClCompileContext.h" +#include "src/gpu/cl/IClKernel.h" + +namespace arm_compute +{ +// Forward declerations +struct MatMulKernelInfo; +namespace opencl +{ +namespace kernels +{ +class ClMatMulLowpNativeKernel : public IClKernel +{ +public: + ClMatMulLowpNativeKernel(); + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClMatMulLowpNativeKernel); + /** Initialise the kernel's input and output. + * + * @param[in] compile_context The compile context to be used. + * @param[in] lhs Input tensor for the LHS matrix. Data type supported: QASYMM8_SIGNED/QASYMM8. + * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[in] rhs Input tensor for the RHS matrix. Data type supported: same as @p lhs. + * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[out] output Output tensor info. Data type supported: same as @p lhs + * @param[in] matmul_info Attributes for Batch MatMul Kernel + */ + void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *output, const MatMulKernelInfo &matmul_info); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref ClMatMulLowpNativeKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulKernelInfo &matmul_info); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; +}; +} // namespace kernels +} // namespace opencl +} // namespace arm_compute +#endif /* ACL_SRC_GPU_CL_KERNELS_CLMATMULLOWPNATIVEKERNEL */ diff --git a/tests/validation/CL/MatMulKernel.cpp b/tests/validation/CL/MatMulKernel.cpp index 9c19e42d04..ff872aaa0a 100644 --- a/tests/validation/CL/MatMulKernel.cpp +++ b/tests/validation/CL/MatMulKernel.cpp @@ -73,7 +73,7 @@ const auto k0_values_nightly_rhs_t = framework::dataset::make("K0", { 1, const auto k0_values_nightly_lhs_t_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 5, 6, 7, 8 }); template -using CLMatMulKernelFixture = MatMulKernelValidationFixture; +using CLMatMulKernelFixture = MatMulKernelValidationFixture; TEST_SUITE(CL) TEST_SUITE(MatMulKernel) @@ -95,8 +95,8 @@ TEST_CASE(SupportedBlockSizes, framework::DatasetMode::ALL) { MatMulKernelInfo(false, false, 9, 1, 2), true }, { MatMulKernelInfo(false, false, 3, 16, 3), true }, { MatMulKernelInfo(false, false, 7, 3, 4), true }, - { MatMulKernelInfo(false, false, 7, 3, 4, true), false }, // N0 not in {4, 8, 16} - { MatMulKernelInfo(false, false, 7, 1, 4, true), false }, // N0 not in {4, 8, 16} + { MatMulKernelInfo(false, false, 7, 3, 4, true), false }, // N0 not in {4, 8, 16} + { MatMulKernelInfo(false, false, 7, 1, 4, true), false }, // N0 not in {4, 8, 16} { MatMulKernelInfo(false, false, 7, 12, 4, true), false }, // N0 not in {4, 8, 16} { MatMulKernelInfo(false, false, 7, 4, 4, true), true }, { MatMulKernelInfo(false, false, 7, 8, 4, true), true }, @@ -166,7 +166,7 @@ TEST_CASE(SupportedBlockSizes, framework::DatasetMode::ALL) if(!pair.first.export_rhs_to_cl_image || export_to_cl_image_supported) { - ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); } } } @@ -176,9 +176,9 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) // We skip this test if the hardware does not support exporting to CL Image if(image2d_from_buffer_supported(CLKernelLibrary::get().get_device())) { - constexpr size_t pixel_size = 4; - const size_t max_image_w = pixel_size * CLKernelLibrary::get().get_device().getInfo(); - const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo(); + constexpr size_t pixel_size = 4; + const size_t max_image_w = pixel_size * CLKernelLibrary::get().get_device().getInfo(); + const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo(); using ShapeConfigurationTuple = std::tuple; const std::vector shape_configurations = @@ -186,18 +186,18 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) // lhs_shape, rhs_shape, adj_lhs, adj_rhs, expected // Lhs t/Nt, Rhs Nt // Transposition of Lhs doesn't add any value to the tests, therefore always assumed false below - { TensorShape(5U, 1U), TensorShape(3U, 5U), false, false, false }, // N should be multiple of 4 + { TensorShape(5U, 1U), TensorShape(3U, 5U), false, false, false }, // N should be multiple of 4 { TensorShape(5U, 1U), TensorShape(14U, 5U), false, false, false }, // N should be multiple of 4 { TensorShape(5U, 1U), TensorShape(12U, 5U), false, false, true }, { TensorShape(5U, 1U), TensorShape(8U, 5U), false, false, true }, { TensorShape(5U, 1U), TensorShape(4U, 5U), false, false, true }, { TensorShape(max_image_h + 1, 1U), TensorShape(4U, max_image_h + 1), false, false, false }, // Cannot fit into CL Image memory's height - { TensorShape(5U, 1U), TensorShape(max_image_w + 1, 5U), false, false, false }, // Cannot fit into CL Image memory's width - { TensorShape(max_image_h, 1U), TensorShape(4U, max_image_h), false, false, true }, // Barely fits into CL Image memory's height - { TensorShape(5U, 1U), TensorShape(max_image_w, 5U), false, false, true }, // Barely fits into CL Image memory's width + { TensorShape(5U, 1U), TensorShape(max_image_w + 1, 5U), false, false, false }, // Cannot fit into CL Image memory's width + { TensorShape(max_image_h, 1U), TensorShape(4U, max_image_h), false, false, true }, // Barely fits into CL Image memory's height + { TensorShape(5U, 1U), TensorShape(max_image_w, 5U), false, false, true }, // Barely fits into CL Image memory's width // Lhs Nt/T , Rhs T - { TensorShape(5U, 1U), TensorShape(5U, 3U), false, true, false }, // K should be multiple of 4 + { TensorShape(5U, 1U), TensorShape(5U, 3U), false, true, false }, // K should be multiple of 4 { TensorShape(5U, 1U), TensorShape(5U, 14U), false, true, false }, // K should be multiple of 4 { TensorShape(4U, 1U), TensorShape(4U, 10U), false, true, true }, { TensorShape(8U, 1U), TensorShape(8U, 9U), false, true, true }, @@ -216,7 +216,10 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) const bool adj_rhs = std::get<3>(tuple); // We choose M0, N0, K0 equal to 4 so that they're always valid for CLImage in any combination - const MatMulKernelInfo matmul_kernel_info {adj_lhs, adj_rhs, 4, 4, 4, true /* export_rhs_to_cl_image */}; + const MatMulKernelInfo matmul_kernel_info + { + adj_lhs, adj_rhs, 4, 4, 4, true /* export_rhs_to_cl_image */ + }; TensorInfo output_info; Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); @@ -330,60 +333,60 @@ TEST_SUITE(Float) TEST_SUITE(FP32) TEST_SUITE(Buffer) FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F32))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F32))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_nt_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { true })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { true })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_t), k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_t), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_t_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output @@ -391,12 +394,12 @@ FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulKernelFixture, fram } FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { true })), - m0_values_nightly_lhs_t), - n0_values_nightly_rhs_t), - k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { true })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_t), + k0_values_nightly_rhs_t), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F32))) { // Validate output @@ -405,13 +408,13 @@ FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture // Running High Dimensional test is enough for FP32, because we're stressing the number of dimensions, not data type or M0/N0/K0 // It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 2 })), - framework::dataset::make("K0", { 2 })), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F32))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2 })), + framework::dataset::make("K0", { 2 })), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); @@ -419,14 +422,15 @@ FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulKernelFixture, framewo TEST_SUITE_END() // Buffer TEST_SUITE(ExportRhsToCLImage) -FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 2, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 2, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -434,14 +438,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture, f validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 1, 2, 3, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 1, 2, 3, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -449,14 +454,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture, f validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } } -FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 2, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -464,14 +470,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture, fram validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 1, 2, 3, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 1, 2, 3, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output if(_device_supports_export_to_cl_image) @@ -485,61 +492,62 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) TEST_SUITE(Buffer) FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("pretransose_A", { false, true })), - framework::dataset::make("pretransose_B", { false, true })), - m0_values_precommit), - n0_values_precommit), - k0_values_precommit), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F16))) + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_nt_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { false })), - framework::dataset::make("pretransose_B", { true })), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { true })), m0_values_nightly_lhs_nt), n0_values_nightly_rhs_t), k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { false })), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_t), n0_values_nightly_rhs_nt), k0_values_nightly_lhs_t_rhs_nt), - framework::dataset::make("export_rhs_to_cl_image", { false })), + framework::dataset::make("ExportRhsToCLImage", { false })), framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } -FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("pretransose_A", { true })), - framework::dataset::make("pretransose_B", { true })), - m0_values_nightly_lhs_t), - n0_values_nightly_rhs_t), - k0_values_nightly_rhs_t), - framework::dataset::make("export_rhs_to_cl_image", { false })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { true })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_t), + k0_values_nightly_rhs_t), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); @@ -547,14 +555,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposedRhsTransposed, CLMatMulKernelFixture TEST_SUITE_END() // Buffer TEST_SUITE(ExportRhsToCLImage) -FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 2, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 2, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) @@ -562,14 +571,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsNotTransposed, CLMatMulKernelFixture, fr validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { false })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 4, 8, 16 })), - framework::dataset::make("K0", { 1, 2, 3, 4 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsNT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 4, 8, 16 })), + framework::dataset::make("K0", { 1, 2, 3, 4 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) @@ -577,14 +587,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeRhsNotTransposed, CLMatMulKernelFixture, fr validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } } -FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), - framework::dataset::make("N0", { 2, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) @@ -592,14 +603,15 @@ FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLMatMulKernelFixture, frame validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } } -FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), - framework::dataset::make("pretransose_A", { true, false })), - framework::dataset::make("pretransose_B", { true })), - framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor - framework::dataset::make("N0", { 1, 2, 3, 4 })), - framework::dataset::make("K0", { 4, 8, 16 })), - framework::dataset::make("export_rhs_to_cl_image", { true })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDatasetRhsExportToCLImageRhsT(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true })), + framework::dataset::make("M0", { 2 })), // Choices of M0 does not matter much because it's related to Lhs tensor + framework::dataset::make("N0", { 1, 2, 3, 4 })), + framework::dataset::make("K0", { 4, 8, 16 })), + framework::dataset::make("ExportRhsToCLImage", { true })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output if(_device_supports_export_to_cl_image) diff --git a/tests/validation/CL/MatMulLowpNativeKernel.cpp b/tests/validation/CL/MatMulLowpNativeKernel.cpp new file mode 100644 index 0000000000..5932fa7c21 --- /dev/null +++ b/tests/validation/CL/MatMulLowpNativeKernel.cpp @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/runtime/CL/CLTensor.h" + +#include "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h" + +#include "tests/datasets/LargeMatMulDataset.h" +#include "tests/datasets/SmallMatMulDataset.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/MatMulKernelFixture.h" +#include "tests/validation/reference/Permute.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +constexpr AbsoluteTolerance tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ +} +template +using CLMatMulLowpNativeKernelFixture = MatMulKernelValidationFixture; + +/** M0 values to test --precommit*/ +const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 }); + +/** N0 values to test --precommit*/ +const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 }); + +/** K0 values to test --precommit*/ +const auto k0_values_precommit = framework::dataset::make("K0", { 2, 3 }); + +/** M0 values to test --nightly*/ +const auto m0_values_nightly_lhs_nt = framework::dataset::make("M0", { 1, 2, 3, 4, 5, 6, 7, 8 }); +const auto m0_values_nightly_lhs_t = framework::dataset::make("M0", { 1, 2, 3, 4, 8 }); + +/** N0 values to test --nightly*/ +const auto n0_values_nightly_rhs_nt = framework::dataset::make("N0", { 1, 2, 3, 4, 8, 16 }); +// const auto n0_values_nightly_rhs_t = framework::dataset::make("N0", { 1, 2, 3, 4, 8 }); + +/** K0 values to test --nightly*/ +const auto k0_values_nightly_lhs_nt_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 8, 16 }); +// const auto k0_values_nightly_rhs_t = framework::dataset::make("K0", { 1, 2, 3, 4, 8 }); +const auto k0_values_nightly_lhs_t_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 5, 6, 7, 8 }); + +TEST_SUITE(CL) +TEST_SUITE(MatMulLowpNativeKernel) +TEST_SUITE(Validate) + +TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL) +{ + using MatMulConfigurationPair = std::pair; + + const std::vector supported_block_sizes = + { + // MatMulKernelInfo(adj_lhs, adj_rhs, M0, N0, K0, export_rhs_to_cl_image = false) + // Lhs not-transposed, Rhs-not-transposed + { MatMulKernelInfo(false, false, 0, 1, 1), false }, // M0 should be > 0 + { MatMulKernelInfo(false, false, 3, 5, 1), false }, // N0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 3, 6, 1), false }, // N0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 3, 3, 17), false }, // K0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 3, 3, 7), false }, // K0 not in {1, 2, 3, 4, 8, 16} + { MatMulKernelInfo(false, false, 9, 1, 2), true }, + { MatMulKernelInfo(false, false, 3, 16, 3), true }, + { MatMulKernelInfo(false, false, 7, 3, 4), true }, + { MatMulKernelInfo(false, false, 7, 3, 4, true), true }, // export to CLImage is unsupported for quantized types + }; + + // Set big enough shapes so that block sizes are not truncated. Also, set all dimensions equal + // so that it doesn't fail for different NT/T configurations. We aim to test the block sizes here, + // not the shapes themselves. + const TensorInfo lhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED); + const TensorInfo rhs_info = TensorInfo(TensorShape(100U, 100U), 1, DataType::QASYMM8_SIGNED); + + for(auto &pair : supported_block_sizes) + { + TensorInfo output_info; + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, pair.first); + + ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); + } +} + +TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) +{ + // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations + using ShapeConfigurationTuple = std::tuple; + const std::vector shape_configurations = + { + { TensorShape(5U, 1U), TensorShape(3U, 5U), true }, + { TensorShape(10U, 12U), TensorShape(3U, 10U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 8U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 5U), false }, // Mismatch in the K dimension + { TensorShape(5U, 0U), TensorShape(2U, 5U), false }, // Invalid dimension + { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), true }, + { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // no batch broadcasting + { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // mismatch in batch dimension + }; + + for(auto &tuple : shape_configurations) + { + const bool expected = std::get<2>(tuple); + + for(bool adj_lhs : + { + false, true + }) + { + for(bool adj_rhs : + { + false, true + }) + { + TensorShape lhs_shape = std::get<0>(tuple); + TensorShape rhs_shape = std::get<1>(tuple); + + if(adj_lhs) + { + permute(lhs_shape, PermutationVector(1U, 0U)); + } + + if(adj_rhs) + { + permute(rhs_shape, PermutationVector(1U, 0U)); + } + + const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED); + const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED); + TensorInfo output_info; + + MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ }; + + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); + } + } + } +} + +TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL) +{ + using DataTypeConfigurationTuple = std::tuple; + const std::vector data_type_configurations = + { + { DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types + { DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types + { DataType::F64, DataType::F64, DataType::F64, false }, // no double precision + { DataType::QASYMM8, DataType::QASYMM8, DataType::QASYMM8, true }, + { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, true }, + { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QASYMM16, DataType::QASYMM16, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QSYMM16, DataType::QSYMM16, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QSYMM8, DataType::QSYMM8, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QASYMM8, false }, // no mixed data types + { DataType::S64, DataType::S64, DataType::S64, false }, // no integral types + { DataType::S32, DataType::S32, DataType::S32, false }, // no integral types + { DataType::S16, DataType::S16, DataType::S16, false }, // no integral types + { DataType::S8, DataType::S8, DataType::S8, false }, // no integral types + { DataType::U64, DataType::U64, DataType::U64, false }, // no integral types + { DataType::U32, DataType::U32, DataType::U32, false }, // no integral types + { DataType::U16, DataType::U16, DataType::U16, false }, // no integral types + { DataType::U8, DataType::U8, DataType::U8, false }, // no integral types + }; + + // It's enough to test a single shape and block size configuration while checking data types + const TensorShape shape = TensorShape(10U, 10U); + const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 1, false }; + for(auto &tuple : data_type_configurations) + { + const bool expected = std::get<3>(tuple); + + const TensorInfo lhs_info(shape, 1, std::get<0>(tuple)); + const TensorInfo rhs_info(shape, 1, std::get<1>(tuple)); + TensorInfo output_info(shape, 1, std::get<2>(tuple)); + + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); + } +} + +TEST_SUITE_END() // Validate + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_nt), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_nt_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_t_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +// Running High Dimensional test is enough for qasymm8_signed, because we're stressing the number of dimensions, not data type or M0/N0/K0 +// It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels +FIXTURE_DATA_TEST_CASE(RunHighDimensional, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + framework::dataset::make("M0", { 2 })), + framework::dataset::make("N0", { 2 })), + framework::dataset::make("K0", { 2 })), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunTiny, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::TinyMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { false })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_nt), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_nt_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_nt), + k0_values_nightly_lhs_t_rhs_nt), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE_END() // Quantized +TEST_SUITE_END() // MatMulLowpNativeKernel +TEST_SUITE_END() // CL +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp index be194dd266..110325c5a0 100644 --- a/tests/validation/Helpers.cpp +++ b/tests/validation/Helpers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "tests/validation/Helpers.h" +#include "tests/framework/Asserts.h" #include #include @@ -373,6 +374,105 @@ void add_padding_y(std::initializer_list tensors, const DataLayout &d } } +QuantizationInfo calculate_mat_mul_dst_q_info(const QuantizationInfo &a_q_info, const QuantizationInfo &b_q_info, int m, int n, int k, DataType data_type) +{ + ARM_COMPUTE_UNUSED(m, n); + QuantizationInfo c_q_info; + + ARM_COMPUTE_ASSERT(data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED); + + const int32_t t_max = static_cast(data_type == DataType::QASYMM8 ? std::numeric_limits::max() : std::numeric_limits::max()); + const int32_t t_min = static_cast(data_type == DataType::QASYMM8 ? std::numeric_limits::min() : std::numeric_limits::min()); + + /** Quantization Setup of matrix multiplication + * + * We have a matrix multiplication of the form C = A * B + * where A is (M X K), B is (K x N) and C is therefore (M x N). + * + * If we have some distributions statistics of A and B, i.e. mean and variance, + * we can estimate the mean and variance of a single value in C matrix and + * pick good scale and offset values for the output and have non-saturated tests. + * + * Each element in the output matrix can be calculated as follows: + * C_ij = sum_k(A_ik * B_kj) + * + * All values are float above. + * + * Note: All possible A_ik, B_kj random variables are assumed mutually independent. + * + * Terminology: + * E[X]: Mean of the random variable X (sometimes referred as mu_x) + * var(X): Variance of the random variable X (someimes referred as sigma^2_x) + * std(X): sqrt(var(X)), standard deviation of X + * + * 1) Calculate the mean: + * E[C_ij] = sum_k( E[A_ik] * E[B_kj] ) = K * mean_a * mean_b + * + * Since elements of A and B are uniformly distributed random variables, we have + * mean_a = (max_a + min_a) / 2, mean_b = (max_b + min_b ) / 2 + * max_a and min_a can be calculated with the scale_a/b and offset_a/b + * by replacing data type minimum and maximums in the equations + * + * 2) Calculate the variance: + * var(C_ij) = sum_k( var(A_ik * B_kj) ) + * = sum_k ( E[A_ik^2 * B_kj^2] - E[A_ik]^2E[B_kj^2] ) + * = ... + * = K * (var_a * var_b + var_a * mean^2_b + var_b * mean^2_a) + * + * Similarly, due to uniform random variable properties, we have + * var_a = (max_a - min_a)^2 / 12 + * var_b = (max_b - min_b)^2 / 12 + * + * + * 3) Now, we have an idea of what would an average C_ij will like and how much deviation + * is present around it. The exact distribution of C is not easy to come up with dependent on K. + * But, as K increases, due to Central Limit Theorem, it'll look more like a bell shaped figure, + * approaching normal distribution. + * + * This is useful because, in normal distribution, we know that values +- 2 std_deviation around + * the mean constitute 95% of the values. Therefore, setting a plausible range for us: + * C_range = [C_min, C_max] = [mean_c - 2 * std_c, mean_c + 2 * std_c] + * + * 4) + * If we map this [C_min, C_max] to [0, 255] or [-128, 127] depending on the signedness of the + * data type, we can find a suitable scale and offset for the output. On average, it's expected + * that 5% of the output values will saturate and 95% will remain in the range. + * + * The equations to be solved for offset_c and scale_c are: + * C_min = scale_c * (type_min - offset_c) + * C_max = scale_c * (type_max - offset_c) + */ + + const int32_t a_offset = a_q_info.uniform().offset; + const float a_scale = a_q_info.uniform().scale; + const int32_t b_offset = b_q_info.uniform().offset; + const float b_scale = b_q_info.uniform().scale; + + // Lhs/A stats + const float max_a = (t_max - a_offset) * a_scale; + const float min_a = (t_min - a_offset) * a_scale; + const float mean_a = (max_a + min_a) / 2; + const float var_a = (max_a - min_a) * (max_a - min_a) / 12; + + // Rhs/B stats + const float max_b = (t_max - b_offset) * b_scale; + const float min_b = (t_min - b_offset) * b_scale; + const float mean_b = (max_b + min_b) / 2; + const float var_b = (max_b - min_b) * (max_b - min_b) / 12; + + // Output stats + const float mean_out = k * mean_a * mean_b; + const float var_out = k * (var_a * var_b + var_a * mean_b * mean_b + var_b * mean_a * mean_a); + const float std_out = sqrt(var_out); + + // Output quantization setup + const float scale_out = 4 * std_out / 255; + const int32_t offset_out = static_cast(t_min - (mean_out - 2.f * std_out) / scale_out); + + c_q_info = QuantizationInfo(scale_out, offset_out); + return c_q_info; +} + template void get_tile(const SimpleTensor &in, SimpleTensor &roi, const Coordinates &coord); template void get_tile(const SimpleTensor &in, SimpleTensor &roi, const Coordinates &coord); template void get_tile(const SimpleTensor &in, SimpleTensor &roi, const Coordinates &coord); diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 2e48a6b8c6..3449239e45 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_VALIDATION_HELPERS_H -#define ARM_COMPUTE_TEST_VALIDATION_HELPERS_H +#ifndef ACL_TESTS_VALIDATION_HELPERS +#define ACL_TESTS_VALIDATION_HELPERS #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" @@ -250,7 +250,12 @@ void add_padding_x(std::initializer_list tensors, const DataLayout &d * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC */ void add_padding_y(std::initializer_list tensors, const DataLayout &data_layout = DataLayout::NHWC); + +/** For MatMulLowp, given the Lhs/Rhs matrix quantization informations and the matrix multiplication dimensions, + * calculate a suitable output quantization for obtaining non-saturated outputs with high probability. + */ +QuantizationInfo calculate_mat_mul_dst_q_info(const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, int m, int n, int k, DataType data_type); } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_VALIDATION_HELPERS_H */ +#endif /* ACL_TESTS_VALIDATION_HELPERS */ diff --git a/tests/validation/fixtures/MatMulKernelFixture.h b/tests/validation/fixtures/MatMulKernelFixture.h index 10e2a0659a..7d0b1a40a9 100644 --- a/tests/validation/fixtures/MatMulKernelFixture.h +++ b/tests/validation/fixtures/MatMulKernelFixture.h @@ -25,11 +25,15 @@ #define ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE #include "arm_compute/core/KernelDescriptors.h" -#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + #include "tests/CL/CLAccessor.h" #include "tests/CL/Helper.h" #include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" #include "tests/validation/reference/GEMM.h" +#include "tests/validation/reference/GEMMLowp.h" #include "tests/validation/reference/Permute.h" #include "tests/validation/reference/ReshapeLayer.h" @@ -43,14 +47,43 @@ namespace validation { using namespace arm_compute::opencl::kernels; -template +template class MatMulKernelValidationFixture : public framework::Fixture { public: template - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0, bool export_rhs_to_cl_image, DataType data_type) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type) { // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices. + QuantizationInfo lhs_q_info; + QuantizationInfo rhs_q_info; + QuantizationInfo dst_q_info; + + if(is_data_type_quantized(data_type)) + { + const int32_t t_max = static_cast(std::numeric_limits::max()); + const int32_t t_min = static_cast(std::numeric_limits::min()); + + std::mt19937 generator(library->seed()); + std::uniform_real_distribution distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + lhs_q_info = QuantizationInfo(scale_lhs, offset_lhs); + rhs_q_info = QuantizationInfo(scale_rhs, offset_rhs); + + const int m = shape_a.y(); + const int n = shape_b.x(); + const int k = shape_a.x(); + + dst_q_info = calculate_mat_mul_dst_q_info(lhs_q_info, rhs_q_info, m, n, k, data_type); + } + if(pretranspose_a) { permute(shape_a, PermutationVector(1U, 0U)); @@ -65,8 +98,8 @@ public: if(!export_rhs_to_cl_image || _device_supports_export_to_cl_image) { - _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type); - _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type); + _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, lhs_q_info, rhs_q_info, dst_q_info); + _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type, lhs_q_info, rhs_q_info, dst_q_info); } } @@ -93,23 +126,29 @@ protected: } } + template + void fill_constant(U &&tensor, D value) + { + library->fill_tensor_value(tensor, value); + } + CLTensor compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0, - bool export_rhs_to_cl_image, DataType data_type) + bool export_rhs_to_cl_image, DataType data_type, const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info) { - // Create tensors - CLTensor a = create_tensor(shape_a, data_type, 1); - CLTensor b = create_tensor(shape_b, data_type, 1); - CLTensor dst = create_tensor(output_shape, data_type, 1); - - CLSynthetizeOperator matMul{}; - MatMulKernelInfo matmul_info; - matmul_info.adj_lhs = pretranspose_a; - matmul_info.adj_rhs = pretranspose_b; - matmul_info.m0 = M0; - matmul_info.n0 = N0; - matmul_info.k0 = K0; + CLSynthetizeOperator matMul{}; + MatMulKernelInfo matmul_info; + matmul_info.adj_lhs = pretranspose_a; + matmul_info.adj_rhs = pretranspose_b; + matmul_info.m0 = M0; + matmul_info.n0 = N0; + matmul_info.k0 = K0; matmul_info.export_rhs_to_cl_image = export_rhs_to_cl_image; + // Create tensors + CLTensor a = create_tensor(shape_a, data_type, 1, lhs_q_info); + CLTensor b = create_tensor(shape_b, data_type, 1, rhs_q_info); + CLTensor dst = create_tensor(output_shape, data_type, 1, dst_q_info); + matMul.configure(a.info(), b.info(), dst.info(), matmul_info); ARM_COMPUTE_ASSERT(a.info()->is_resizable()); ARM_COMPUTE_ASSERT(b.info()->is_resizable()); @@ -138,18 +177,19 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type) + SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type, + const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info) { // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D // This is necessary unless we choose to extend gemm reference for 5D+ tensors - TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimW); - TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimW); - TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimW); + TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ); + TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimZ); + TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimZ); // Create reference - SimpleTensor a{ shape_a_collapsed, data_type, 1 }; - SimpleTensor b{ shape_b_collapsed, data_type, 1 }; - SimpleTensor c{ output_shape_collapsed, data_type, 1 }; + SimpleTensor a{ shape_a_collapsed, data_type, 1, lhs_q_info }; + SimpleTensor b{ shape_b_collapsed, data_type, 1, rhs_q_info }; + SimpleTensor c{ output_shape_collapsed, data_type, 1, dst_q_info }; // Fill reference fill(a, 0); @@ -185,10 +225,8 @@ protected: b_transposed = reference::permute(b, PermutationVector(1U, 0U)); } - // Setting beta to 0 will effectively disable C for the - // computation of the reference: alpha * A * B + 0 * C // Use transposed tensors if boolean enabled else use original tensors - SimpleTensor result = reference::gemm((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, 1.0f, 0.f); + SimpleTensor result = gemm_reference((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c); // We reshape the gemm output back if the tensor is high dimensional if(output_shape_collapsed != output_shape) @@ -199,9 +237,43 @@ protected: return result; } + template + typename std::enable_if < std::is_same::value || std::is_same::value, SimpleTensor>::type gemm_reference(SimpleTensor &a, SimpleTensor &b, SimpleTensor &c) + { + // Setting beta to 0 will effectively disable C for the + // computation of the reference: alpha * A * B + 0 * C + return reference::gemm(a, b, c, 1.0f, 0.f); + } + + template + typename std::enable_if < std::is_same::value || std::is_same::value, SimpleTensor>::type gemm_reference(SimpleTensor &a, SimpleTensor &b, SimpleTensor &c) + { + const UniformQuantizationInfo aq = a.quantization_info().uniform(); + const UniformQuantizationInfo bq = b.quantization_info().uniform(); + const UniformQuantizationInfo cq = c.quantization_info().uniform(); + + const SimpleTensor result = reference::gemmlowp_matrix_multiply_core(a, b, c.shape(), -aq.offset, -bq.offset); + + std::vector gemmlowp_multipliers{ 1 }; + std::vector gemmlowp_shifts{ 1 }; + const int gemmlowp_offset = cq.offset; + const float scale = aq.scale * bq.scale / cq.scale; + + quantization::calculate_quantized_multiplier(scale, &gemmlowp_multipliers[0], &gemmlowp_shifts[0]); + constexpr int32_t gemmlowp_min_bound = std::numeric_limits::min(); + constexpr int32_t gemmlowp_max_bound = std::numeric_limits::max(); + + SimpleTensor bias{ c.shape(), DataType::S32 }; + fill_constant(bias, static_cast(0)); + + const SimpleTensor final_result = reference::gemmlowp_quantize_down_scale_by_fixedpoint(result, bias, + gemmlowp_multipliers, gemmlowp_shifts, gemmlowp_offset, gemmlowp_min_bound, gemmlowp_max_bound); + return final_result; + } + CLTensor _target{}; SimpleTensor _reference{}; - bool _device_supports_export_to_cl_image { true }; + bool _device_supports_export_to_cl_image{ true }; }; } // namespace validation -- cgit v1.2.1