From 8e2dedea8550b1c18c3bbeead8c972f661dcfac8 Mon Sep 17 00:00:00 2001 From: Mohammed Suhail Munshi Date: Tue, 27 Jun 2023 14:25:58 +0100 Subject: Add Bias to MatMul Kernels and add support for use in Fully Connected Layer Resolves: [COMPMID-6316] Signed-off-by: Mohammed Suhail Munshi Change-Id: I08e6bac9e6b46b76978da0dc6a48ccfe3dde5086 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9833 Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- src/core/CL/cl_kernels/common/mat_mul.cl | 247 ++++++++++++------- src/core/CL/cl_kernels/common/mat_mul_mmul.cl | 271 +++++++++++++-------- src/core/CL/cl_kernels/common/mat_mul_quantized.cl | 239 +++++++++++------- src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp | 37 ++- src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h | 12 +- src/gpu/cl/kernels/ClMatMulNativeKernel.cpp | 34 ++- src/gpu/cl/kernels/ClMatMulNativeKernel.h | 12 +- src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp | 39 ++- src/gpu/cl/kernels/ClMatMulNativeMMULKernel.h | 9 +- src/gpu/cl/operators/ClFullyConnected.cpp | 144 ++++++----- src/gpu/cl/operators/ClFullyConnected.h | 11 +- src/gpu/cl/operators/ClMatMul.cpp | 8 +- .../matmul_native/ClMatMulNativeHelpers.cpp | 2 +- tests/validation/CL/MatMulKernel.cpp | 49 ++-- tests/validation/CL/MatMulLowpNativeKernel.cpp | 92 ++++--- tests/validation/CL/MatMulNativeMMULKernel.cpp | 154 +++++++----- tests/validation/fixtures/MatMulKernelFixture.h | 103 +++++++- 17 files changed, 942 insertions(+), 521 deletions(-) diff --git a/src/core/CL/cl_kernels/common/mat_mul.cl b/src/core/CL/cl_kernels/common/mat_mul.cl index 9656a59728..c7ef8ae52b 100644 --- a/src/core/CL/cl_kernels/common/mat_mul.cl +++ b/src/core/CL/cl_kernels/common/mat_mul.cl @@ -25,6 +25,21 @@ #include "helpers.h" #include "tile_helpers.h" +#ifdef BIAS +// This function performs in-place bias addition for float/half datatype when bias is enabled. +// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4). +inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(DATA_TYPE, M0, N0, acc), uint x) +{ + TILE(DATA_TYPE, 1, N0, bias_tile); + + // below expands to use bias_ptr and bias_offset_first_element_in_bytes + T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile); + + // c = c + bias[broadcasted] + T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, M0, N0, acc, bias_tile, acc); +} +#endif // defined(BIAS) + #if defined(MAT_MUL_NATIVE_NT_NT) /** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only * @@ -43,32 +58,42 @@ * - K0 = 1, 2, 3, 4, 8, 16 * @note Values > 8 for M0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_nt_nt( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, RHS_TENSOR_TYPE), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); @@ -149,6 +174,10 @@ __kernel void mat_mul_native_nt_nt( indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); }); +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); @@ -173,31 +202,41 @@ __kernel void mat_mul_native_nt_nt( * - K0 = 1, 2, 3, 4, 8, 16 (only 4, 8, 16 if RHS_TENSOR_TYPE=IMAGE) * @note Values > 8 for M0, N0 and K0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, RHS_TENSOR_TYPE), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { @@ -306,6 +345,10 @@ __kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); }); +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); @@ -330,32 +373,42 @@ __kernel void mat_mul_native_nt_t(TENSOR3D_T(lhs, BUFFER), * - K0 > 0 * * @note Values > 8 for M0, and K0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_t_nt( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, RHS_TENSOR_TYPE), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); @@ -459,6 +512,10 @@ __kernel void mat_mul_native_t_nt( indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); }); +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); @@ -483,32 +540,42 @@ __kernel void mat_mul_native_t_nt( * - K0 = 1, 2, 3, 4, 8, 16 (only 4, 8, 16 if RHS_TENSOR_TYPE=IMAGE) * @note Values > 8 for M0, N0 and K0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_img (Optional) Read only cl_image object for the rhs tensor. Included when RHS_TENSOR_TYPE=IMAGE + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr, + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_t_t( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, RHS_TENSOR_TYPE), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); @@ -630,6 +697,10 @@ __kernel void mat_mul_native_t_t( indirect_buffer[_i].v = min(_i, select(M0 - 1, PARTIAL_STORE_M0 - 1, y_cond)); }); +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, 0, dst_stride_y, x_cond, acc, indirect_buffer); diff --git a/src/core/CL/cl_kernels/common/mat_mul_mmul.cl b/src/core/CL/cl_kernels/common/mat_mul_mmul.cl index a53db27fb8..e549da86d4 100644 --- a/src/core/CL/cl_kernels/common/mat_mul_mmul.cl +++ b/src/core/CL/cl_kernels/common/mat_mul_mmul.cl @@ -24,6 +24,21 @@ #include "helpers.h" #include "tile_helpers.h" +#ifdef BIAS +// This function performs in-place bias addition for float and half datatypes when bias is enabled. +// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4). +inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(DATA_TYPE, M0, N0, acc), uint x) +{ + TILE(DATA_TYPE, 1, N0, bias_tile); + + // below expands to use bias_ptr and bias_offset_first_element_in_bytes + T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile); + + // c = c + bias[broadcasted] + T_ELTWISE_BROADCAST_ADD_X(DATA_TYPE, M0, N0, acc, bias_tile, acc); +} +#endif // defined(BIAS) + #if defined(MAT_MUL_NATIVE_MMUL_NT_NT) /** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul) using MMUL: LHS non-transposed, RHS non-transposed - buffer only * @@ -40,34 +55,44 @@ * - K0 = 1 * @note Values > 8 for M0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix - * @param[in] M Number of rows in LHS matrix - * @param[in] N Number of columns in RHS matrix - * @param[in] K Number of columns in LHS matrix and rows in RHS matrix, which is multiple of MMUL_K0. + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] M Number of rows in LHS matrix + * @param[in] N Number of columns in RHS matrix + * @param[in] K Number of columns in LHS matrix and rows in RHS matrix, which is multiple of MMUL_K0. */ __kernel void mat_mul_native_mmul_nt_nt( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER), const int M, const int N, @@ -90,7 +115,7 @@ __kernel void mat_mul_native_mmul_nt_nt( // x = [0, ((N / N0) / MMUL_N0) * MMUL_N0 * MMUL_M0) // x = [0, (N / N0) * MMUL_MO) const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0) - // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) + // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0) const uint z = get_global_id(2); // Batch @@ -347,6 +372,10 @@ __kernel void mat_mul_native_mmul_nt_nt( #define c c_f32 #endif // defined(HALF_PRECISION) +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x); +#endif // defined(BIAS) + if(dst_x + N0 <= N || N0_LEFTOVER == 0) { LOOP_UNROLLING(int, m0, 0, 1, M0, @@ -391,34 +420,44 @@ __kernel void mat_mul_native_mmul_nt_nt( * - K0 = 1 * @note Values > 8 for M0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix - * @param[in] M Number of rows in DST matrix - * @param[in] N Number of columns in DST matrix - * @param[in] K Number of rows in LHS and RHS matrices, which is multiple of MMUL_K0. + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] M Number of rows in DST matrix + * @param[in] N Number of columns in DST matrix + * @param[in] K Number of rows in LHS and RHS matrices, which is multiple of MMUL_K0. */ __kernel void mat_mul_native_mmul_t_nt( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER), const int M, const int N, @@ -428,7 +467,7 @@ __kernel void mat_mul_native_mmul_t_nt( // For explanations on how this kernel works, please refer to NT/NT kernel. This kernel makes little modifications to it. const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0) - // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) + // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0) const uint z = get_global_id(2); // Batch @@ -511,6 +550,10 @@ __kernel void mat_mul_native_mmul_t_nt( #define c c_f32 #endif // defined(HALF_PRECISION) +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x); +#endif // defined(BIAS) + if(dst_x + N0 <= N || N0_LEFTOVER == 0) { LOOP_UNROLLING(int, m0, 0, 1, M0, @@ -554,34 +597,44 @@ __kernel void mat_mul_native_mmul_t_nt( * - K0 = 1 * @note Values > 8 for M0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix - * @param[in] M Number of rows in LHS matrix - * @param[in] N Number of columns in RHS matrix - * @param[in] K Number of columns in LHS matrix and columns in RHS matrix, which is multiple of MMUL_K0. + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] M Number of rows in LHS matrix + * @param[in] N Number of columns in RHS matrix + * @param[in] K Number of columns in LHS matrix and columns in RHS matrix, which is multiple of MMUL_K0. */ __kernel void mat_mul_native_mmul_nt_t( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER), const int M, const int N, @@ -591,7 +644,7 @@ __kernel void mat_mul_native_mmul_nt_t( // For explanations on how this kernel works, please refer to NT/NT kernel. This kernel makes little modifications to it. const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0) - // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) + // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0) const uint z = get_global_id(2); // Batch @@ -679,6 +732,10 @@ __kernel void mat_mul_native_mmul_nt_t( #define c c_f32 #endif // defined(HALF_PRECISION) +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x); +#endif // defined(BIAS) + if(dst_x + N0 <= N || N0_LEFTOVER == 0) { LOOP_UNROLLING(int, m0, 0, 1, M0, @@ -722,34 +779,44 @@ __kernel void mat_mul_native_mmul_nt_t( * - K0 = 1 * @note Values > 8 for M0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix - * @param[in] M Number of rows in LHS matrix - * @param[in] N Number of columns in RHS matrix - * @param[in] K Number of rows in LHS matrix and columns in RHS matrix, which is multiple of MMUL_K0. + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: F32/F16 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] M Number of rows in LHS matrix + * @param[in] N Number of columns in RHS matrix + * @param[in] K Number of rows in LHS matrix and columns in RHS matrix, which is multiple of MMUL_K0. */ __kernel void mat_mul_native_mmul_t_t( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER), const int M, const int N, @@ -759,7 +826,7 @@ __kernel void mat_mul_native_mmul_t_t( // For explanations on how this kernel works, please refer to NT/NT kernel. This kernel makes little modifications to it. const uint x0 = get_global_id(0); // [0, (N / N0) * MMUL_M0) - // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) + // The upper limit is a simplified version of (N / N0) / MMUL_N0) * MMUL_BLOCK_SIZE) const uint y0 = get_global_id(1); // [0, (M / M0) / MMUL_M0) const uint z = get_global_id(2); // Batch @@ -847,6 +914,10 @@ __kernel void mat_mul_native_mmul_t_t( #define c c_f32 #endif // defined(HALF_PRECISION) +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, c, dst_x); +#endif // defined(BIAS) + if(dst_x + N0 <= N || N0_LEFTOVER == 0) { LOOP_UNROLLING(int, m0, 0, 1, M0, diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl index 7029af2188..7f81ac4549 100644 --- a/src/core/CL/cl_kernels/common/mat_mul_quantized.cl +++ b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl @@ -25,6 +25,21 @@ #include "helpers.h" #include "tile_helpers.h" +#ifdef BIAS +// This function performs in-place bias addition for integer datatype when bias is enabled. +// Note The tile's dimensions used for the LHS and RHS matrices (M0, N0) must be passed at compile time using -DN0, -DM0 (e.g. -DN0=8, -DM0=4). +inline void perform_bias_addition(uchar *bias_ptr, uint bias_offset_first_element_in_bytes, TILE(int, M0, N0, acc), uint x) +{ + TILE(int, 1, N0, bias_tile); + + // below expands to use bias_ptr and bias_offset_first_element_in_bytes + T_LOAD(int, 1, N0, BUFFER, bias, x, 0, 1, 0, bias_tile); + + // c = c + bias[broadcasted] + T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, acc, bias_tile, acc); +} +#endif // defined(BIAS) + #if defined(MAT_MUL_NATIVE_QUANTIZED_NT_NT) /** This OpenCL kernel performs the batch matrix multiplication (BatchMatMul): LHS non-transposed, RHS non-transposed - buffer only * @@ -43,31 +58,41 @@ * - K0 = 1, 2, 3, 4, 8, 16 * @note Values > 8 for M0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8_SIGNED/QASYMM8 - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8_SIGNED/QASYMM8 + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_quantized_nt_nt( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); @@ -197,6 +222,10 @@ __kernel void mat_mul_native_quantized_nt_nt( const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + // Quantize the tile TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); @@ -231,31 +260,41 @@ __kernel void mat_mul_native_quantized_nt_nt( * - K0 = 1, 2, 3, 4, 8, 16 * @note Values > 8 for M0, N0, K0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_quantized_nt_t( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); @@ -377,6 +416,10 @@ __kernel void mat_mul_native_quantized_nt_t( const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + // Quantize the tile TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); @@ -411,31 +454,41 @@ __kernel void mat_mul_native_quantized_nt_t( * - K0 = 1, 2, 3, 4, 8, 16 * @note Values > 8 for M0, N0 and K0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_quantized_t_nt( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); @@ -559,6 +612,10 @@ __kernel void mat_mul_native_quantized_t_nt( const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + // Quantize the tile TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); @@ -593,31 +650,41 @@ __kernel void mat_mul_native_quantized_t_nt( * - K0 = 1, 2, 3, 4, 8, 16 * @note Values > 8 for M0, N0 and K0 are not expected to be efficient * - * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED - * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) - * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) - * @param[in] lhs_w The width of the lhs tensor - * @param[in] lhs_h The height of the lhs tensor - * @param[in] lhs_n Number of the matrices (buffers) in the batch - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix - * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr - * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) - * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) - * @param[in] rhs_w The width of the rhs tensor - * @param[in] rhs_h The height of the rhs tensor - * @param[in] rhs_n Number of the matrices (buffers) in the batch - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix - * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr - * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) - * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) - * @param[in] dst_w The width of the dst tensor - * @param[in] dst_h The height of the dst tensor - * @param[in] dst_n Number of the matrices (buffers) in the batch - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix + * @param[in] lhs_ptr Pointer to the lhs matrix. Supported data types: QASYMM8/QASYMM8_SIGNED + * @param[in] lhs_stride_y Stride of the lhs matrix in Y (2nd) dimension (in bytes) + * @param[in] lhs_stride_z Stride of the lhs tensor in Z (3rd) dimension (in bytes) + * @param[in] lhs_w The width of the lhs tensor + * @param[in] lhs_h The height of the lhs tensor + * @param[in] lhs_n Number of the matrices (buffers) in the batch + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the lhs matrix + * @param[in] rhs_ptr Pointer to the rhs matrix. Supported data types: same as @p lhs_ptr + * @param[in] rhs_stride_y Stride of the rhs matrix in Y (2nd) dimension (in bytes) + * @param[in] rhs_stride_z Stride of the rhs tensor in Z (3rd) dimension (in bytes) + * @param[in] rhs_w The width of the rhs tensor + * @param[in] rhs_h The height of the rhs tensor + * @param[in] rhs_n Number of the matrices (buffers) in the batch + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the rhs matrix + * @param[in] bias_ptr (Optional) Pointer to the bias tensor. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes) + * @param[in] bias_w (Optional) The size of the width dimension of the bias tensor + * @param[in] bias_h (Optional) The size of the height dimension of the bias tensor + * @param[in] bias_n (Optional) The size of the depth dimension of the bias tensor + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor + * @param[out] dst_ptr Pointer to the dst matrix. Supported data types: same as @p lhs_ptr + * @param[in] dst_stride_y Stride of the dst matrix in Y (2nd) dimension (in bytes) + * @param[in] dst_stride_z Stride of the dst tensor in Z (3rd) dimension (in bytes) + * @param[in] dst_w The width of the dst tensor + * @param[in] dst_h The height of the dst tensor + * @param[in] dst_n Number of the matrices (buffers) in the batch + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the dst matrix */ __kernel void mat_mul_native_quantized_t_t( TENSOR3D_T(lhs, BUFFER), TENSOR3D_T(rhs, BUFFER), +#ifdef BIAS + TENSOR3D_T(bias, BUFFER), +#endif // defined(BIAS) TENSOR3D_T(dst, BUFFER)) { const uint x = GET_SPATIAL_IDX(0, N0, PARTIAL_STORE_N0); @@ -745,6 +812,10 @@ __kernel void mat_mul_native_quantized_t_t( const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; +#ifdef BIAS + perform_bias_addition(bias_ptr, bias_offset_first_element_in_bytes, acc, x); +#endif // defined(BIAS) + // Quantize the tile TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp index 02c5754672..a0eb3f2853 100644 --- a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp @@ -100,7 +100,8 @@ ClMatMulLowpNativeKernel::ClMatMulLowpNativeKernel() { _type = CLKernelType::GEMM; } -Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info) +Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); @@ -111,24 +112,32 @@ Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ITensorI ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.activation() != ActivationFunction::IDENTITY && act_info.activation() != ActivationFunction::RELU && act_info.activation() != ActivationFunction::LU_BOUNDED_RELU && act_info.activation() != ActivationFunction::BOUNDED_RELU), "Activation Function specified is unsupported."); + const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info); if(dst->total_size() != 0) { - const TensorInfo tensor_info_output = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + const TensorInfo tensor_info_output = dst->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); } + if(bias != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); + ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != bias->dimension(0)); + } + return Status{}; } -void ClMatMulLowpNativeKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, +void ClMatMulLowpNativeKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst, &compile_context, &matmul_kernel_info); - ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst, matmul_kernel_info); - ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, dst, matmul_kernel_info)); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst, matmul_kernel_info); + ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, bias, dst, matmul_kernel_info)); - // output tensor auto initialization if not yet initialized + // dst tensor auto initialization if not yet initialized auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); const int m = dst->dimension(1); @@ -172,7 +181,8 @@ void ClMatMulLowpNativeKernel::configure(const ClCompileContext &compile_context // Note : Offset is not negated, unlike gemmlowp kernels build_opts.add_option("-DLHS_OFFSET=" + support::cpp11::to_string(lqinfo.offset)); build_opts.add_option("-DRHS_OFFSET=" + support::cpp11::to_string(rqinfo.offset)); - build_opts.add_option("-DDST_OFFSET=" + support::cpp11::to_string(dqinfo.offset)); // Passed as positive (unlike the above two) + build_opts.add_option("-DDST_OFFSET=" + support::cpp11::to_string(dqinfo.offset)); + build_opts.add_option_if(bias != nullptr, "-DBIAS"); // Floating point boundaries are quantized prior to being passed as arguments. // Note: We expect the input and output tensors to always adopt a per-tensor quantization approach @@ -222,17 +232,22 @@ void ClMatMulLowpNativeKernel::run_op(ITensorPack &tensors, const Window &window ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - const ICLTensor *lhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); - const ICLTensor *rhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); - ICLTensor *dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); + const ICLTensor *lhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); + const ICLTensor *rhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); + const ICLTensor *bias = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_2)); + ICLTensor *dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst); - ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst); unsigned int idx = 0; Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ); add_3d_tensor_nhw_argument(idx, lhs); add_3d_tensor_nhw_argument(idx, rhs); + if(bias != nullptr) + { + add_3d_tensor_nhw_argument(idx, bias); + } add_3d_tensor_nhw_argument(idx, dst); enqueue(queue, *this, window_collapsed, lws_hint()); diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h index 67d1a6601f..c90828008c 100644 --- a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.h @@ -45,15 +45,16 @@ public: /** Initialise the kernel's input and output. * * @param[in] compile_context The compile context to be used. - * @param[in] lhs Input tensor for the LHS matrix. Data type supported: QASYMM8_SIGNED/QASYMM8. + * @param[in] lhs Input tensor info for the LHS matrix. Data type supported: QASYMM8_SIGNED/QASYMM8. * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. - * @param[in] rhs Input tensor for the RHS matrix. Data type supported: same as @p lhs. + * @param[in] rhs Input tensor info for the RHS matrix. Data type supported: same as @p lhs. * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[in] bias Bias tensor info. Can be nullptr. Data type supported: S32. * @param[out] dst Output tensor info. Data type supported: same as @p lhs * @param[in] matmul_kernel_info Attributes for Batch MatMul Kernel - * @param[in] act_info Class containing information about fused activation function. + * @param[in] act_info (Optional) Class containing information about fused activation function. */ - void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration * @@ -61,7 +62,8 @@ public: * * @return a status */ - static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; diff --git a/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp index 205396a639..545a5b2f62 100644 --- a/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp @@ -120,7 +120,8 @@ ClMatMulNativeKernel::ClMatMulNativeKernel() _type = CLKernelType::GEMM; } -Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info) +Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + const ActivationLayerInfo &act_info) { ARM_COMPUTE_UNUSED(act_info); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); @@ -130,21 +131,30 @@ Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); ARM_COMPUTE_RETURN_ON_ERROR(validate_export_to_cl_image(rhs, matmul_kernel_info)); + const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info); + if(dst->total_size() != 0) { - const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); } + if(bias != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, lhs); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias->num_dimensions() > 1), "Multi dimensional bias is unsupported."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != expected_output_shape[0], "First dimension of bias and output tensors must match."); + } + return Status{}; } -void ClMatMulNativeKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, +void ClMatMulNativeKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst, &compile_context, &matmul_kernel_info); - ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst, matmul_kernel_info); - ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, dst, matmul_kernel_info)); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst, matmul_kernel_info); + ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, bias, dst, matmul_kernel_info)); // dst tensor auto initialization if not yet initialized auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); @@ -176,6 +186,7 @@ void ClMatMulNativeKernel::configure(const ClCompileContext &compile_context, IT build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0)); build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0)); build_opts.add_option("-DK=" + support::cpp11::to_string(k)); + build_opts.add_option_if(bias != nullptr, "-DBIAS"); build_opts.add_option_if_else(_export_rhs_to_cl_image, "-DRHS_TENSOR_TYPE=IMAGE", "-DRHS_TENSOR_TYPE=BUFFER"); // Define values for activation function @@ -225,11 +236,12 @@ void ClMatMulNativeKernel::run_op(ITensorPack &tensors, const Window &window, cl ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - const ICLTensor *lhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); - const ICLTensor *rhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); - ICLTensor *dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); + const ICLTensor *lhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); + const ICLTensor *rhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); + const ICLTensor *bias = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_2)); // nullptr if bias is not present + ICLTensor *dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst); - ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst); unsigned int idx = 0; Window window_collapsed = window.collapse(ICLKernel::window(), Window::DimZ); @@ -250,6 +262,10 @@ void ClMatMulNativeKernel::run_op(ITensorPack &tensors, const Window &window, cl } add_3d_tensor_nhw_argument(idx, rhs); + if(bias != nullptr) + { + add_3d_tensor_nhw_argument(idx, bias); + } add_3d_tensor_nhw_argument(idx, dst); enqueue(queue, *this, window_collapsed, lws_hint()); diff --git a/src/gpu/cl/kernels/ClMatMulNativeKernel.h b/src/gpu/cl/kernels/ClMatMulNativeKernel.h index 02d8ac3067..fe2b787c12 100644 --- a/src/gpu/cl/kernels/ClMatMulNativeKernel.h +++ b/src/gpu/cl/kernels/ClMatMulNativeKernel.h @@ -43,15 +43,16 @@ public: /** Initialise the kernel's input and output. * * @param[in] compile_context The compile context to be used. - * @param[in] lhs Input tensor for the LHS matrix. Data type supported: F32/F16. + * @param[in] lhs Input tensor info for the LHS matrix. Data type supported: F32/F16. * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. - * @param[in] rhs Input tensor for the RHS matrix. Data type supported: same as @p lhs. + * @param[in] rhs Input tensor info for the RHS matrix. Data type supported: same as @p lhs. * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[in] bias Bias tensor info for bias matrix. Can be nullptr. Data type supported: same as @p lhs. * @param[out] dst Output tensor info. Data type supported: same as @p lhs * @param[in] matmul_kernel_info Attributes for Batch MatMul Kernel - * @param[in] act_info Specifies activation function to use after Matrix multiplication. Default is Identity function. + * @param[in] act_info (Optional) Specifies activation function to use after Matrix multiplication. Default is Identity function. */ - void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration * @@ -59,7 +60,8 @@ public: * * @return a status */ - static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; diff --git a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp index 4630ec08e9..0efcfb105c 100644 --- a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp @@ -60,9 +60,9 @@ inline std::pair adjust_m0_n0(int m0, int n0, int m, int n) Status validate_matmul_kernel_info(const MatMulKernelInfo &matmul_kernel_info) { const bool adj_lhs = matmul_kernel_info.adj_lhs; - const int m0 = matmul_kernel_info.m0; - const int n0 = matmul_kernel_info.n0; - const int k0 = matmul_kernel_info.k0; + const int m0 = matmul_kernel_info.m0; + const int n0 = matmul_kernel_info.n0; + const int k0 = matmul_kernel_info.k0; // Validate M0 ARM_COMPUTE_RETURN_ERROR_ON_MSG(m0 < 1, "Only positive integers are supported for M0"); @@ -149,7 +149,7 @@ ClMatMulNativeMMULKernel::ClMatMulNativeMMULKernel() _type = CLKernelType::GEMM; } -Status ClMatMulNativeMMULKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info) +Status ClMatMulNativeMMULKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16); @@ -158,20 +158,29 @@ Status ClMatMulNativeMMULKernel::validate(const ITensorInfo *lhs, const ITensorI ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + const TensorShape expected_output_shape = misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info); + if(dst->total_size() != 0) { - const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); } + if(bias != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias->num_dimensions() > 1), "Multi dimensional bias is unsupported."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != expected_output_shape[0], "First dimension of bias and output tensors must match."); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, bias); + } + return Status{}; } -void ClMatMulNativeMMULKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info) +void ClMatMulNativeMMULKernel::configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst); - ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst, matmul_kernel_info); - ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, dst, matmul_kernel_info)); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst, matmul_kernel_info); + ARM_COMPUTE_ERROR_THROW_ON(validate(lhs, rhs, bias, dst, matmul_kernel_info)); // dst tensor auto initialization if not yet initialized auto_init_if_empty(*dst, lhs->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info))); @@ -207,6 +216,7 @@ void ClMatMulNativeMMULKernel::configure(const ClCompileContext &compile_context build_opts.add_option("-DMMUL_M0=" + support::cpp11::to_string(mmul_m0)); build_opts.add_option("-DMMUL_N0=" + support::cpp11::to_string(mmul_n0)); build_opts.add_option("-DMMUL_K0=" + support::cpp11::to_string(mmul_k0)); + build_opts.add_option_if(bias != nullptr, "-DBIAS"); std::string kernel_name("mat_mul_native_mmul"); kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt"; @@ -239,15 +249,20 @@ void ClMatMulNativeMMULKernel::run_op(ITensorPack &tensors, const Window &window ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - const ICLTensor *lhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); - const ICLTensor *rhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); - ICLTensor *dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); + const ICLTensor *lhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); + const ICLTensor *rhs = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); + const ICLTensor *bias = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_2)); // nullptr if bias is not present + ICLTensor *dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); ARM_COMPUTE_ERROR_ON_NULLPTR(lhs, rhs, dst); - ARM_COMPUTE_LOG_PARAMS(lhs, rhs, dst); + ARM_COMPUTE_LOG_PARAMS(lhs, rhs, bias, dst); unsigned int idx = 0; add_3d_tensor_nhw_argument(idx, lhs); add_3d_tensor_nhw_argument(idx, rhs); + if(bias != nullptr) + { + add_3d_tensor_nhw_argument(idx, bias); + } add_3d_tensor_nhw_argument(idx, dst); // Pass m and n at runtime as signed ints, to ensure results of any subtractions they could be operand in, would still be signed. diff --git a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.h b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.h index 79f675d03b..80448974c4 100644 --- a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.h +++ b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.h @@ -66,19 +66,20 @@ public: * - No broadcasting in batch dimensions. I.e. batch dims must be the same across lhs, rhs and dst * * @param[in] compile_context The compile context to be used. - * @param[in] lhs Input tensor for the LHS matrix. - * @param[in] rhs Input tensor for the RHS matrix. + * @param[in] lhs Input tensor info for the LHS matrix. + * @param[in] rhs Input tensor info for the RHS matrix. + * @param[in] bias Bias tensor info. Can be nullptr. Data type supported: Same as @p lhs. * @param[out] dst Output tensor info. * @param[in] matmul_info Attributes for Batch MatMul Kernel */ - void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_info); + void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_info); /** Static function to check if given info will lead to a valid configuration * * Similar to @ref ClMatMulNativeMMULKernel::configure() * * @return a status */ - static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_info); + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_info); // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; diff --git a/src/gpu/cl/operators/ClFullyConnected.cpp b/src/gpu/cl/operators/ClFullyConnected.cpp index b7ba8b89fa..0be3f0f87e 100644 --- a/src/gpu/cl/operators/ClFullyConnected.cpp +++ b/src/gpu/cl/operators/ClFullyConnected.cpp @@ -113,22 +113,25 @@ Status construct_gemmlowp_output_stage(const ITensorInfo &src, const ITensorInfo Status validate_mm(const ITensorInfo &src, const ITensorInfo &weights, const ITensorInfo *bias, const ITensorInfo &dst, const FullyConnectedLayerInfo &fc_info) { - // If weights are dynamic, data is not batched, and bias is nullptr validate using matmul. - const bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true; - const bool use_matmul = !weights.are_values_constant() && !weights_reshaped && !(dst.dimension(1) > 1) && (bias == nullptr); + // Note : If input is dynamic and data is not batched, use matmul, else use gemm + const bool transpose_weights = fc_info.transpose_weights ? !fc_info.are_weights_reshaped : false; + const bool use_matmul = !weights.are_values_constant() && !(dst.dimension(1) > 1); + const bool use_dynamic_gemm = !use_matmul && !weights.are_values_constant() && transpose_weights; // use dynamic gemm as fallback for matmul + const bool is_quantized = is_data_type_quantized_asymmetric(src.data_type()); if(use_matmul) { - MatMulInfo m_info{}; - m_info.adj_rhs(fc_info.transpose_weights); + const MatMulInfo m_info = MatMulInfo().adj_rhs(transpose_weights); - // Note: Currently, shape is [M, B0, B1] - // LHS is reshaped here to match ClMatMul expectations of batch index in format - [M, 1, B0, B1, .. ] - TensorInfo lhs_to_use{ src }; - lhs_to_use.set_tensor_shape(get_reshaped_matmul_tensor(src.tensor_shape())); + // Note: LHS is reshaped here to match ClMatMul expectations of batch index - From [M, B0, B1] to [M, 1, B0, B1] + TensorInfo lhs_to_use = src.clone()->set_tensor_shape(get_reshaped_matmul_tensor(src.tensor_shape())); - // Operator level validation. - ARM_COMPUTE_RETURN_ON_ERROR(ClMatMul::validate(&lhs_to_use, &weights, &dst, m_info, fc_info.activation_info)); + const GPUTarget gpu_target = CLScheduler::get().target(); + std::unique_ptr t = cl_matmul::ClMatMulNativeKernelConfigurationFactory::create(gpu_target); + const MatMulKernelInfo kernel_info = t->configure(&lhs_to_use, &weights, m_info); + + return is_quantized ? kernels::ClMatMulLowpNativeKernel::validate(&lhs_to_use, &weights, bias, &dst, kernel_info, fc_info.activation_info) : + kernels::ClMatMulNativeKernel::validate(&lhs_to_use, &weights, bias, &dst, kernel_info, fc_info.activation_info); } else { @@ -137,7 +140,7 @@ Status validate_mm(const ITensorInfo &src, const ITensorInfo &weights, const ITe const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped false, // is_b_reshaped - true, // reshape_b_only_on_first_run + !use_dynamic_gemm, // reshape_b_only_on_first_run 0, // depth_output_gemm3d false, // reinterpret_input_as_3d fc_info.retain_internal_weights, // retain_internal_weights @@ -147,7 +150,7 @@ Status validate_mm(const ITensorInfo &src, const ITensorInfo &weights, const ITe true, // broadcast_bias ActivationLayerInfo()); // activation_info - if(is_data_type_quantized_asymmetric(src.data_type())) + if(is_quantized) { const UniformQuantizationInfo iq_info = src.quantization_info().uniform(); const UniformQuantizationInfo wq_info = weights.quantization_info().uniform(); @@ -191,35 +194,33 @@ ClFullyConnected::~ClFullyConnected() = default; void ClFullyConnected::configure_mm(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst, const FullyConnectedLayerInfo &fc_info) { - // If weights are dynamic, configure matmul operator - else use gemm + // If weights are dynamic and matmul is supported use matmul, else use gemm if(_use_matmul) { - // Transpose RHS as _are_weights_reshaped == false when mat_mul is used. - const MatMulInfo mat_info = MatMulInfo().adj_rhs(fc_info.transpose_weights); + // Specify whether transpose weights is necessary in matmul info + const MatMulInfo mat_info = MatMulInfo().adj_rhs(_transpose_weights); // Note: MatMul does not need offset negation unlike gemm // 1. Change shape when calling matmul to fit batch expectations. - _lhs_to_use = *src->clone(); - _lhs_to_use.set_tensor_shape(get_reshaped_matmul_tensor(_lhs_to_use.tensor_shape())); // Collapse all dims > 2 into final dimension. - _is_quantized = is_data_type_quantized_asymmetric(_lhs_to_use.data_type()); + _lhs_to_use = src->clone()->set_tensor_shape(get_reshaped_matmul_tensor(_lhs_to_use.tensor_shape())); - // 2. Call kernel for matmul directly. + // 2. Use heuristics to get kernel info object const GPUTarget gpu_target = CLScheduler::get().target(); std::unique_ptr kernel_config = cl_matmul::ClMatMulNativeKernelConfigurationFactory::create(gpu_target); + MatMulKernelInfo kernel_info = kernel_config->configure(src, weights, mat_info); - // Configure relevant matmul kernel - MatMulKernelInfo kernel_info = kernel_config->configure(src, weights, mat_info); + // 3. Configure relevant matmul kernel if(_is_quantized) { _matmul_lowp_native_kernel = std::make_unique(); _matmul_lowp_native_kernel->set_target(gpu_target); - _matmul_lowp_native_kernel->configure(compile_context, src, weights, dst, kernel_info, fc_info.activation_info); + _matmul_lowp_native_kernel->configure(compile_context, src, weights, bias, dst, kernel_info, fc_info.activation_info); } else { _matmul_native_kernel = std::make_unique(); _matmul_native_kernel->set_target(gpu_target); - _matmul_native_kernel->configure(compile_context, src, weights, dst, kernel_info, fc_info.activation_info); + _matmul_native_kernel->configure(compile_context, src, weights, bias, dst, kernel_info, fc_info.activation_info); } } else @@ -230,7 +231,7 @@ void ClFullyConnected::configure_mm(const CLCompileContext &compile_context, ITe const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped false, // is_b_reshaped - !_dynamic_weights, // reshape_b_only_on_first_run + !_dynamic_gemm, // reshape_b_only_on_first_run 0, // depth_output_gemm3d false, // reinterpret_input_as_3d fc_info.retain_internal_weights, // retain_internal_weights @@ -269,7 +270,8 @@ void ClFullyConnected::configure_mm(const CLCompileContext &compile_context, ITe void ClFullyConnected::configure_conv_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst, const FullyConnectedLayerInfo &fc_info) { - ARM_COMPUTE_ERROR_ON((weights->dimension((_use_matmul) ? 0 : 1) != (src->dimension(0) * src->dimension(1) * src->dimension(2)))); + // MatMul fuses transpose operation, so we use the first dimension for comparison where appropriate. + ARM_COMPUTE_ERROR_ON((weights->dimension((_use_matmul && _transpose_weights) ? 0 : 1) != (src->dimension(0) * src->dimension(1) * src->dimension(2)))); // If the fully connected layer is called after a convolution layer, the input tensor must be linearized @@ -288,8 +290,8 @@ void ClFullyConnected::configure_conv_fc(const CLCompileContext &compile_context void ClFullyConnected::configure_fc_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst, const FullyConnectedLayerInfo &fc_info) { - // Compare first dimension when using matmul, as it performs transpose operation - ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension((_use_matmul) ? 0 : 1)); + // MatMul fuses transpose operation, so we use the first dimension for comparison where appropriate. + ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension((_use_matmul && _transpose_weights) ? 0 : 1)); // Configure matrix multiply kernel configure_mm(compile_context, src, weights, bias, dst, fc_info); @@ -304,20 +306,18 @@ void ClFullyConnected::configure(const CLCompileContext &compile_context, ITenso ARM_COMPUTE_ERROR_THROW_ON(ClFullyConnected::validate(src, weights, biases, dst, fc_info)); ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, fc_info); - _are_weights_converted = true; - _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true; - _is_fc_after_conv = true; - _is_quantized = is_data_type_quantized_asymmetric(src->data_type()); - _is_prepared = fc_info.retain_internal_weights; - _weights_to_use = TensorInfo(*weights); - _weights_to_use_idx = ACL_SRC_1; + _transpose_weights = fc_info.transpose_weights ? !fc_info.are_weights_reshaped : false; + _is_fc_after_conv = true; + _is_quantized = is_data_type_quantized_asymmetric(src->data_type()); + _is_prepared = fc_info.retain_internal_weights; + _weights_to_use = TensorInfo(*weights); + _weights_to_use_idx = ACL_SRC_1; // When using dynamic weights - use matmul kernels. - // Note: We don't appear to support dynamic weights with pre-reshaped RHS. - // Note: No matmul with biases for the moment. + // Note: MatMul does not support broadcasting batch dimension, and therefore is disabled if fc is batched. Gemm is used as fallback. const bool is_batched_fc_layer = dst->dimension(1) > 1; - _dynamic_weights = !weights->are_values_constant() && !_are_weights_reshaped; - _use_matmul = _dynamic_weights && !is_batched_fc_layer && (biases == nullptr); + _use_matmul = !weights->are_values_constant() && !is_batched_fc_layer; + _dynamic_gemm = !weights->are_values_constant() && _transpose_weights && !_use_matmul; // With the Fully Connected layer we can have 4 different cases: // 1) Convolution layer -> Fully Connected layer without batches @@ -339,9 +339,8 @@ void ClFullyConnected::configure(const CLCompileContext &compile_context, ITenso ITensorInfo *weights_used = weights; - // Reshape weights if needed - // Not needed when matmul is in use - MatMul has transpose RHS flags. - if(!_are_weights_reshaped && !_use_matmul) + // Reshape weights if needed - Not needed when matmul is in use as matmul fuses transpose op. + if(_transpose_weights && !_use_matmul) { // Reshape the weights _reshape_weights = std::make_unique(); @@ -361,9 +360,9 @@ void ClFullyConnected::configure(const CLCompileContext &compile_context, ITenso src->tensor_shape(), fc_info.weights_trained_layout); - weights_used = &_converted_weights; - _weights_to_use_idx = offset_int_vec(ConvertedWeights); - _are_weights_converted = false; + weights_used = &_converted_weights; + _weights_to_use_idx = offset_int_vec(ConvertedWeights); + _run_convert_weights = true; } if(_is_fc_after_conv) @@ -398,11 +397,11 @@ void ClFullyConnected::configure(const CLCompileContext &compile_context, ITenso // Keep all the auxiliary tensors in case of dynamic weights as they are recalculated every time _aux_mem[TransposedWeights] = MemoryInfo( offset_int_vec(TransposedWeights), - _dynamic_weights ? MemoryLifetime::Temporary : MemoryLifetime::Prepare, + _dynamic_gemm ? MemoryLifetime::Temporary : MemoryLifetime::Prepare, _reshaped_weights.total_size()); _aux_mem[ConvertedWeights] = MemoryInfo( offset_int_vec(ConvertedWeights), - _dynamic_weights ? MemoryLifetime::Temporary : MemoryLifetime::Prepare, + _dynamic_gemm ? MemoryLifetime::Temporary : MemoryLifetime::Prepare, _converted_weights.total_size()); } else @@ -413,11 +412,11 @@ void ClFullyConnected::configure(const CLCompileContext &compile_context, ITenso _aux_mem[TransposedWeights] = MemoryInfo( offset_int_vec(TransposedWeights), - _dynamic_weights ? MemoryLifetime::Temporary : transposed_wei_lft, + _dynamic_gemm ? MemoryLifetime::Temporary : transposed_wei_lft, _reshaped_weights.total_size()); _aux_mem[ConvertedWeights] = MemoryInfo( offset_int_vec(ConvertedWeights), - _dynamic_weights ? MemoryLifetime::Temporary : converted_wei_lft, + _dynamic_gemm ? MemoryLifetime::Temporary : converted_wei_lft, _converted_weights.total_size()); } } @@ -434,19 +433,17 @@ Status ClFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *wei ARM_COMPUTE_RETURN_ERROR_ON(fc_info.activation_info.enabled() && is_data_type_quantized(src->data_type()) && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU); - const bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true; - bool is_fc_after_conv = true; + const bool transpose_weights = fc_info.transpose_weights ? !fc_info.are_weights_reshaped : false; + bool is_fc_after_conv = true; // When using dynamic weights - use matmul kernels. - // Note: MatMul does not support broadcasting or biases so fallback with batched cases or when biases != nullptr. - // Note: Pre-Shaped RHS is a deprecated use case and is therefore not supported with matmul. - const bool dynamic_weights = !weights->are_values_constant() && !weights_reshaped; + // Note: MatMul does not support broadcasting so fallback with batched cases. const bool is_batched_fc_layer = dst->dimension(1) > 1; - const bool use_matmul = dynamic_weights && !is_batched_fc_layer && (biases == nullptr); + const bool use_matmul = !weights->are_values_constant() && !is_batched_fc_layer; const ITensorInfo &flatten_src = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)).set_data_layout(DataLayout::NCHW)); const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights))); - const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone()); + const ITensorInfo &converted_weights = transpose_weights ? TensorInfo(*reshaped_weights.clone()) : TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()); // With the Fully Connected layer we can have 4 different cases: // 1) Convolution layer -> Fully Connected layer without batches @@ -482,7 +479,8 @@ Status ClFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *wei is_fc_after_conv = src->num_dimensions() > 1; } - if(!weights_reshaped && !use_matmul) + // Transpose kernel does not run when matmul is supported as matmul fuses transpose op. + if(transpose_weights && !use_matmul) { // Validate reshape weights kernel ARM_COMPUTE_RETURN_ON_ERROR(ClTranspose::validate(weights, &reshaped_weights)); @@ -502,14 +500,9 @@ Status ClFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *wei if(is_fc_after_conv) { // Fully Connected layer after a Convolution Layer without batches - if(use_matmul) - { - ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(0) != (src->dimension(0) * src->dimension(1) * src->dimension(2)))); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2)))); - } + // K Index of matrix multiplication. MatMul performs transpose in kernel, so index is 0 when matmul and transpose enabled + const int weight_idx = (use_matmul && transpose_weights) ? 0 : 1; + ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(weight_idx) != (src->dimension(0) * src->dimension(1) * src->dimension(2)))); // Validate flatten kernel ARM_COMPUTE_RETURN_ON_ERROR(ClFlatten::validate(src, &flatten_src)); @@ -518,7 +511,9 @@ Status ClFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *wei else { // Fully Connected layer after a Fully Connected Layer without batches - ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension((use_matmul) ? 0 : 1)); + // K Index of matrix multiplication. MatMul performs transpose in kernel, so index is 0 when matmul and transpose enabled + const int weight_idx = (use_matmul && transpose_weights) ? 0 : 1; + ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(weight_idx)); } // Validate matrix multiply kernel @@ -533,7 +528,7 @@ void ClFullyConnected::run(ITensorPack &tensors) #ifdef ARM_COMPUTE_ASSERTS_ENABLED ++_asrt_run_count; - ARM_COMPUTE_ERROR_ON(_dynamic_weights && _asrt_prepare_count != _asrt_run_count); + ARM_COMPUTE_ERROR_ON(_dynamic_gemm && _asrt_prepare_count != _asrt_run_count); #endif // ARM_COMPUTE_ASSERTS_ENABLED auto src = tensors.get_const_tensor(ACL_SRC_0); @@ -584,11 +579,12 @@ void ClFullyConnected::run(ITensorPack &tensors) void ClFullyConnected::prepare(ITensorPack &tensors) { - if(!_is_prepared || _dynamic_weights) + // Note : Running prepare() each run when _use_matmul is true is unnecessary unless weights conversion is needed. + if(!_is_prepared || _dynamic_gemm || (_use_matmul && _run_convert_weights)) { #ifdef ARM_COMPUTE_ASSERTS_ENABLED ++_asrt_prepare_count; - ARM_COMPUTE_ERROR_ON(!_dynamic_weights && _asrt_prepare_count > 1); + ARM_COMPUTE_ERROR_ON(!_dynamic_gemm && !_use_matmul && _asrt_prepare_count > 1); #endif // ARM_COMPUTE_ASSERTS_ENABLED auto weights = tensors.get_const_tensor(ACL_SRC_1); @@ -599,8 +595,8 @@ void ClFullyConnected::prepare(ITensorPack &tensors) // Pointer to current weights const ITensor *cur_weights = weights; - // Reshape of the weights if needed - if(!_are_weights_reshaped && !_use_matmul) + // Reshape weights if needed. Disabled when matmul kernels are enabled as matmul fuses transpose. + if(_transpose_weights && !_use_matmul) { // Run reshape weights kernel and mark weights as unused ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } }; @@ -611,7 +607,7 @@ void ClFullyConnected::prepare(ITensorPack &tensors) } // Convert weights if needed - if(!_are_weights_converted) + if(_run_convert_weights) { ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } }; _convert_weights->run(convert_pack); @@ -623,8 +619,8 @@ void ClFullyConnected::prepare(ITensorPack &tensors) ITensorPack gemm_pack = tensors; gemm_pack.add_const_tensor(ACL_SRC_1, cur_weights); - // Prepare GEMM prepare and release unused weights (If not using matmul) - if(!_use_matmul) + // Prepare GEMM prepare and release unused weights + if(_dynamic_gemm || !_use_matmul) { if(!_is_quantized) { diff --git a/src/gpu/cl/operators/ClFullyConnected.h b/src/gpu/cl/operators/ClFullyConnected.h index 9a5ba40510..5a71bd24c7 100644 --- a/src/gpu/cl/operators/ClFullyConnected.h +++ b/src/gpu/cl/operators/ClFullyConnected.h @@ -132,17 +132,18 @@ private: TensorInfo _flattened_src{}; TensorInfo _converted_weights{}; TensorInfo _reshaped_weights{}; - TensorInfo _lhs_to_use{}; + TensorInfo _lhs_to_use{}; TensorInfo _weights_to_use{}; int _weights_to_use_idx{ ACL_SRC_1 }; - bool _are_weights_converted{ true }; - bool _are_weights_reshaped{ true }; + bool _run_convert_weights{ false }; + bool _transpose_weights{ false }; + bool _dynamic_gemm{ false }; + bool _use_matmul{ false }; + bool _is_fc_after_conv{ true }; bool _is_quantized{ false }; bool _is_prepared{ false }; - bool _dynamic_weights{ false }; - bool _use_matmul{ false }; #ifdef ARM_COMPUTE_ASSERTS_ENABLED int _asrt_run_count {}; diff --git a/src/gpu/cl/operators/ClMatMul.cpp b/src/gpu/cl/operators/ClMatMul.cpp index c453761a8e..49d14127ca 100644 --- a/src/gpu/cl/operators/ClMatMul.cpp +++ b/src/gpu/cl/operators/ClMatMul.cpp @@ -61,8 +61,8 @@ Status ClMatMul::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const const bool is_quantized = is_data_type_quantized_asymmetric(lhs->data_type()); - return is_quantized ? ClMatMulLowpNativeKernel::validate(lhs, rhs, dst, kernel_info, act_info) : - ClMatMulNativeKernel::validate(lhs, rhs, dst, kernel_info, act_info); + return is_quantized ? ClMatMulLowpNativeKernel::validate(lhs, rhs, nullptr /* bias */, dst, kernel_info, act_info) : + ClMatMulNativeKernel::validate(lhs, rhs, nullptr /* bias */, dst, kernel_info, act_info); } void ClMatMul::configure(const CLCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulInfo &matmul_info, const ActivationLayerInfo &act_info) @@ -86,14 +86,14 @@ void ClMatMul::configure(const CLCompileContext &compile_context, ITensorInfo *l _matmul_lowp_native_kernel->set_target(gpu_target); // Configure the low-precision native matrix multiply kernel - _matmul_lowp_native_kernel->configure(compile_context, lhs, rhs, dst, kernel_info, act_info); + _matmul_lowp_native_kernel->configure(compile_context, lhs, rhs, nullptr /* bias */, dst, kernel_info, act_info); } else { _matmul_native_kernel->set_target(gpu_target); // Configure the native matrix multiply kernel - _matmul_native_kernel->configure(compile_context, lhs, rhs, dst, kernel_info, act_info); + _matmul_native_kernel->configure(compile_context, lhs, rhs, nullptr /* bias */, dst, kernel_info, act_info); } } diff --git a/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp b/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp index b9e0d5adf8..1e06e84d4d 100644 --- a/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp +++ b/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp @@ -52,7 +52,7 @@ MatMulKernelInfo select_info(const MatMulKernelInfo &info0, if(rhs_lock_padding == false) { - if(bool(opencl::kernels::ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &dst_info, info0))) + if(bool(opencl::kernels::ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &dst_info, info0))) { return info0; } diff --git a/tests/validation/CL/MatMulKernel.cpp b/tests/validation/CL/MatMulKernel.cpp index ff872aaa0a..b47f8bc924 100644 --- a/tests/validation/CL/MatMulKernel.cpp +++ b/tests/validation/CL/MatMulKernel.cpp @@ -75,6 +75,9 @@ const auto k0_values_nightly_lhs_t_rhs_nt = framework::dataset::make("K0", { 1, template using CLMatMulKernelFixture = MatMulKernelValidationFixture; +template +using CLMatMulKernelBiasFixture = MatMulKernelWithBiasValidation; + TEST_SUITE(CL) TEST_SUITE(MatMulKernel) TEST_SUITE(Validate) @@ -162,7 +165,7 @@ TEST_CASE(SupportedBlockSizes, framework::DatasetMode::ALL) for(auto &pair : supported_block_sizes) { TensorInfo output_info; - Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &output_info, pair.first); + Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first); if(!pair.first.export_rhs_to_cl_image || export_to_cl_image_supported) { @@ -222,7 +225,7 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) }; TensorInfo output_info; - Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, matmul_kernel_info); const bool expected = std::get<4>(tuple); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); @@ -233,22 +236,25 @@ TEST_CASE(ExportToCLImage, framework::DatasetMode::ALL) TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) { // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations - using ShapeConfigurationTuple = std::tuple; + using ShapeConfigurationTuple = std::tuple; const std::vector shape_configurations = { - { TensorShape(5U, 1U), TensorShape(3U, 5U), true }, - { TensorShape(10U, 12U), TensorShape(3U, 10U), true }, - { TensorShape(8U, 4U), TensorShape(2U, 8U), true }, - { TensorShape(8U, 4U), TensorShape(2U, 5U), false }, // Mismatch in the K dimension - { TensorShape(5U, 0U), TensorShape(2U, 5U), false }, // Invalid dimension - { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), true }, - { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // no batch broadcasting - { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // mismatch in batch dimension + { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U), true }, + { TensorShape(10U, 12U), TensorShape(3U, 10U), TensorShape(3U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 8U), TensorShape(2U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 5U), TensorShape(2U), false }, // Mismatch in the K dimension + { TensorShape(5U, 0U), TensorShape(2U, 5U), TensorShape(2U), false }, // Invalid dimension + { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), true }, + { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // no batch broadcasting + { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // mismatch in batch dimension + { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(1U), false }, // Unsupported bias broadcasting. + { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U, 3U), false }, // 2D bias is unsupported. + { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(6U), false }, // bias first dimension != dst first dimension }; for(auto &tuple : shape_configurations) { - const bool expected = std::get<2>(tuple); + const bool expected = std::get<3>(tuple); for(bool adj_lhs : { @@ -262,6 +268,7 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) { TensorShape lhs_shape = std::get<0>(tuple); TensorShape rhs_shape = std::get<1>(tuple); + TensorShape bia_shape = std::get<2>(tuple); if(adj_lhs) { @@ -275,11 +282,12 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::F32); const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::F32); + const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::F32); TensorInfo output_info; MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ }; - Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } } @@ -322,7 +330,7 @@ TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL) const TensorInfo rhs_info(shape, 1, std::get<1>(tuple)); TensorInfo output_info(shape, 1, std::get<2>(tuple)); - Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + Status status = ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, matmul_kernel_info); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } } @@ -356,6 +364,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulKernelFixture, framework::Datase // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulKernelBiasFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) + +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), framework::dataset::make("TransposeA", { false })), framework::dataset::make("TransposeB", { false })), diff --git a/tests/validation/CL/MatMulLowpNativeKernel.cpp b/tests/validation/CL/MatMulLowpNativeKernel.cpp index fd7a4cb156..90eee4fb82 100644 --- a/tests/validation/CL/MatMulLowpNativeKernel.cpp +++ b/tests/validation/CL/MatMulLowpNativeKernel.cpp @@ -49,6 +49,9 @@ constexpr AbsoluteTolerance tolerance_quant(1); /**< Tolerance value for template using CLMatMulLowpNativeKernelFixture = MatMulKernelValidationFixture; +template +using CLMatMulLowpKernelWithBiasFixture = MatMulKernelWithBiasValidation; + /** M0 values to test --precommit*/ const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 }); @@ -103,7 +106,7 @@ TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL) for(auto &pair : supported_block_sizes) { TensorInfo output_info; - Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, pair.first); + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first); ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); } @@ -112,22 +115,24 @@ TEST_CASE(SupportedKernelConfigurations, framework::DatasetMode::ALL) TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) { // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations - using ShapeConfigurationTuple = std::tuple; + using ShapeConfigurationTuple = std::tuple; const std::vector shape_configurations = { - { TensorShape(5U, 1U), TensorShape(3U, 5U), true }, - { TensorShape(10U, 12U), TensorShape(3U, 10U), true }, - { TensorShape(8U, 4U), TensorShape(2U, 8U), true }, - { TensorShape(8U, 4U), TensorShape(2U, 5U), false }, // Mismatch in the K dimension - { TensorShape(5U, 0U), TensorShape(2U, 5U), false }, // Invalid dimension - { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), true }, - { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // no batch broadcasting - { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // mismatch in batch dimension + { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U), true }, + { TensorShape(10U, 12U), TensorShape(3U, 10U), TensorShape(3U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 8U), TensorShape(2U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 5U), TensorShape(2U), false }, // Mismatch in the K dimension + { TensorShape(5U, 0U), TensorShape(2U, 5U), TensorShape(2U), false }, // Invalid dimension + { TensorShape(5U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), true }, + { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // no batch broadcasting + { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // mismatch in batch dimension + { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(1U), false }, // invalid broadcast of bias + { TensorShape(5U, 1U), TensorShape(3U, 5U), TensorShape(3U, 3U), false }, // 2d bias is invalid }; for(auto &tuple : shape_configurations) { - const bool expected = std::get<2>(tuple); + const bool expected = std::get<3>(tuple); for(bool adj_lhs : { @@ -141,6 +146,7 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) { TensorShape lhs_shape = std::get<0>(tuple); TensorShape rhs_shape = std::get<1>(tuple); + TensorShape bia_shape = std::get<2>(tuple); if(adj_lhs) { @@ -154,11 +160,12 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::QASYMM8_SIGNED); const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::QASYMM8_SIGNED); + const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::S32); TensorInfo output_info; MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ }; - Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } } @@ -167,41 +174,44 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL) { - using DataTypeConfigurationTuple = std::tuple; + using DataTypeConfigurationTuple = std::tuple; const std::vector data_type_configurations = { - { DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types - { DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types - { DataType::F64, DataType::F64, DataType::F64, false }, // no double precision - { DataType::QASYMM8, DataType::QASYMM8, DataType::QASYMM8, true }, - { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, true }, - { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported - { DataType::QASYMM16, DataType::QASYMM16, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported - { DataType::QSYMM16, DataType::QSYMM16, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported - { DataType::QSYMM8, DataType::QSYMM8, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported - { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QASYMM8, false }, // no mixed data types - { DataType::S64, DataType::S64, DataType::S64, false }, // no integral types - { DataType::S32, DataType::S32, DataType::S32, false }, // no integral types - { DataType::S16, DataType::S16, DataType::S16, false }, // no integral types - { DataType::S8, DataType::S8, DataType::S8, false }, // no integral types - { DataType::U64, DataType::U64, DataType::U64, false }, // no integral types - { DataType::U32, DataType::U32, DataType::U32, false }, // no integral types - { DataType::U16, DataType::U16, DataType::U16, false }, // no integral types - { DataType::U8, DataType::U8, DataType::U8, false }, // no integral types + { DataType::F32, DataType::F32, DataType::F32, DataType::F32, false }, // no floating point types + { DataType::F16, DataType::F16, DataType::F16, DataType::F16, false }, // no floating point types + { DataType::F64, DataType::F64, DataType::F64, DataType::F64, false }, // no double precision + { DataType::QASYMM8, DataType::QASYMM8, DataType::S32, DataType::QASYMM8, true }, + { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8_SIGNED, true }, + { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::S32, DataType::QSYMM8_PER_CHANNEL, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QASYMM16, DataType::QASYMM16, DataType::S32, DataType::QASYMM16, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QSYMM16, DataType::QSYMM16, DataType::S32, DataType::QSYMM16, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QSYMM8, DataType::QSYMM8, DataType::S32, DataType::QSYMM8, false }, // only qasymm8/qasymm8_signed is supported + { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8, false }, // no mixed data types + { DataType::S64, DataType::S64, DataType::S64, DataType::S64, false }, // no integral types + { DataType::S32, DataType::S32, DataType::S32, DataType::S32, false }, // no integral types + { DataType::S16, DataType::S16, DataType::S16, DataType::S16, false }, // no integral types + { DataType::S8, DataType::S8, DataType::S8, DataType::S8, false }, // no integral types + { DataType::U64, DataType::U64, DataType::U64, DataType::U64, false }, // no integral types + { DataType::U32, DataType::U32, DataType::U32, DataType::U32, false }, // no integral types + { DataType::U16, DataType::U16, DataType::U16, DataType::U16, false }, // no integral types + { DataType::U8, DataType::U8, DataType::U8, DataType::U8, false }, // no integral types + { DataType::QASYMM8, DataType::QASYMM8, DataType::F32, DataType::QASYMM8, false } // Only S32 bias is supported }; // It's enough to test a single shape and block size configuration while checking data types - const TensorShape shape = TensorShape(10U, 10U); + const TensorShape shape = TensorShape(10U, 10U); + const TensorShape bia_shape = TensorShape(10U); const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 1, false }; for(auto &tuple : data_type_configurations) { - const bool expected = std::get<3>(tuple); + const bool expected = std::get<4>(tuple); const TensorInfo lhs_info(shape, 1, std::get<0>(tuple)); const TensorInfo rhs_info(shape, 1, std::get<1>(tuple)); - TensorInfo output_info(shape, 1, std::get<2>(tuple)); + const TensorInfo bia_info(bia_shape, 1, std::get<2>(tuple)); + TensorInfo output_info(shape, 1, std::get<3>(tuple)); - Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + Status status = ClMatMulLowpNativeKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } } @@ -234,6 +244,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulLowpNativeKernelFixture, framew // Validate output validate(CLAccessor(_target), _reference, tolerance_quant); } +FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulLowpKernelWithBiasFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), + framework::dataset::make("TransposeA", { true, false })), + framework::dataset::make("TransposeB", { true, false })), + m0_values_precommit), + n0_values_precommit), + k0_values_precommit), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_quant); +} FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulLowpNativeKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulDataset(), framework::dataset::make("TransposeA", { false })), diff --git a/tests/validation/CL/MatMulNativeMMULKernel.cpp b/tests/validation/CL/MatMulNativeMMULKernel.cpp index b63af75169..70c80985db 100644 --- a/tests/validation/CL/MatMulNativeMMULKernel.cpp +++ b/tests/validation/CL/MatMulNativeMMULKernel.cpp @@ -70,6 +70,9 @@ const auto k0_value = framework::dataset::make("K0", { 1 }); template using CLMatMulNativeMMULKernelFixture = MatMulKernelValidationFixture; +template +using CLMatMulKernelBiasFixture = MatMulKernelWithBiasValidation; + TEST_SUITE(CL) TEST_SUITE(MatMulNativeMMULKernel) TEST_SUITE(Validate) @@ -117,7 +120,7 @@ TEST_CASE(SupportedBlockSizes, framework::DatasetMode::ALL) { MatMulKernelInfo(true, true, 3, 7, 1), false }, // N0 not in {1, 2, 3, 4, 8, 16} { MatMulKernelInfo(true, true, 6, 3, 1), false }, // M0 not in {1, 2, 3, 4, 8, 16} { MatMulKernelInfo(true, true, 5, 3, 1), false }, // M0 not in {1, 2, 3, 4, 8, 16} - { MatMulKernelInfo(true, true, 4, 8, 2), false }, // K0 is not 1 + { MatMulKernelInfo(true, true, 4, 8, 2), false }, // K0 is not 1 { MatMulKernelInfo(true, true, 4, 8, 1), true }, { MatMulKernelInfo(true, true, 3, 3, 1), true }, { MatMulKernelInfo(true, true, 16, 4, 1), true }, @@ -132,7 +135,7 @@ TEST_CASE(SupportedBlockSizes, framework::DatasetMode::ALL) for(auto &pair : supported_block_sizes) { TensorInfo output_info; - Status status = ClMatMulNativeMMULKernel::validate(&lhs_info, &rhs_info, &output_info, pair.first); + Status status = ClMatMulNativeMMULKernel::validate(&lhs_info, &rhs_info, nullptr, &output_info, pair.first); ARM_COMPUTE_EXPECT(bool(status) == pair.second, framework::LogLevel::ERRORS); } } @@ -148,28 +151,30 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) if(arm_matrix_multiply_supported(CLKernelLibrary::get().get_device())) { // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations - using ShapeConfigurationTuple = std::tuple; + using ShapeConfigurationTuple = std::tuple; // lhs, rhs, bias, result const std::vector shape_configurations = { - { TensorShape(4U, 1U), TensorShape(3U, 4U), true }, - { TensorShape(12U, 12U), TensorShape(3U, 12U), true }, - { TensorShape(8U, 4U), TensorShape(2U, 8U), true }, - { TensorShape(8U, 4U), TensorShape(2U, 4U), false }, // Mismatch in the K dimension - { TensorShape(5U, 0U), TensorShape(2U, 5U), false }, // Invalid dimension - { TensorShape(5U, 7U), TensorShape(2U, 5U), false }, // K not a multiple of 4 (MMUL_K0) - { TensorShape(8U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 8U, 3U, 4U, 5U, 6U), true }, - { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // No batch broadcasting - { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), false }, // Mismatch in batch dimension + { TensorShape(4U, 1U), TensorShape(3U, 4U), TensorShape(3U), true }, + { TensorShape(12U, 12U), TensorShape(3U, 12U), TensorShape(3U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 8U), TensorShape(2U), true }, + { TensorShape(8U, 4U), TensorShape(2U, 4U), TensorShape(2U), false }, // Mismatch in the K dimension + { TensorShape(5U, 0U), TensorShape(2U, 5U), TensorShape(2U), false }, // Invalid dimension + { TensorShape(5U, 7U), TensorShape(2U, 5U), TensorShape(2U), false }, // K not a multiple of 4 (MMUL_K0) + { TensorShape(8U, 4U, 3U, 4U, 5U, 6U), TensorShape(2U, 8U, 3U, 4U, 5U, 6U), TensorShape(2U), true }, + { TensorShape(5U, 4U, 3U, 4U, 5U, 1U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // No batch broadcasting + { TensorShape(5U, 4U, 3U, 4U, 9U, 6U), TensorShape(2U, 5U, 3U, 4U, 5U, 6U), TensorShape(2U), false }, // Mismatch in batch dimension + { TensorShape(4U, 1U), TensorShape(3U, 4U), TensorShape(1U), false }, // Bias first dimensions != dst first dimension. + { TensorShape(4U, 1U), TensorShape(3U, 4U), TensorShape(5U, 6U), false }, // Bias is 2d which is invalid. }; for(auto &tuple : shape_configurations) { - const bool expected = std::get<2>(tuple); + const bool expected = std::get<3>(tuple); - for(bool adj_lhs : - { - false, true - }) + for(bool adj_lhs : + { + false, true + }) { for(bool adj_rhs : { @@ -178,6 +183,7 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) { TensorShape lhs_shape = std::get<0>(tuple); TensorShape rhs_shape = std::get<1>(tuple); + TensorShape bia_shape = std::get<2>(tuple); if(adj_lhs) { @@ -191,11 +197,12 @@ TEST_CASE(ValidateInputShapes, framework::DatasetMode::ALL) const TensorInfo lhs_info = TensorInfo(lhs_shape, 1, DataType::F32); const TensorInfo rhs_info = TensorInfo(rhs_shape, 1, DataType::F32); + const TensorInfo bia_info = TensorInfo(bia_shape, 1, DataType::F32); TensorInfo output_info; MatMulKernelInfo matmul_kernel_info{ adj_lhs, adj_rhs, 1, 1, 1, false /* export_rhs_to_cl_image */ }; - Status status = ClMatMulNativeMMULKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + Status status = ClMatMulNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } } @@ -213,40 +220,44 @@ TEST_CASE(ValidateDataTypes, framework::DatasetMode::ALL) if(arm_matrix_multiply_supported(CLKernelLibrary::get().get_device())) { // Configurations are assumed to be Nt/Nt, but will be transposed inside the test to test other configurations - using DataTypeConfigurationTuple = std::tuple; + using DataTypeConfigurationTuple = std::tuple; const std::vector data_type_configurations = { - { DataType::F32, DataType::F32, DataType::F32, true }, - { DataType::F16, DataType::F16, DataType::F16, true }, - { DataType::F16, DataType::F32, DataType::F32, false }, // no mixed precision - { DataType::F64, DataType::F64, DataType::F64, false }, // no double precision - { DataType::QASYMM8, DataType::QASYMM8, DataType::QASYMM8, false }, // no quantized types - { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, false }, // no quantized types - { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, false }, // no quantized types - { DataType::QASYMM16, DataType::QASYMM16, DataType::QASYMM16, false }, // no quantized types - { DataType::QSYMM16, DataType::QSYMM16, DataType::QSYMM16, false }, // no quantized types - { DataType::QSYMM8, DataType::QSYMM8, DataType::QSYMM8, false }, // no quantized types - { DataType::S64, DataType::S64, DataType::S64, false }, // no integral types - { DataType::S32, DataType::S32, DataType::S32, false }, // no integral types - { DataType::S16, DataType::S16, DataType::S16, false }, // no integral types - { DataType::S8, DataType::S8, DataType::S8, false }, // no integral types - { DataType::U64, DataType::U64, DataType::U64, false }, // no integral types - { DataType::U32, DataType::U32, DataType::U32, false }, // no integral types - { DataType::U16, DataType::U16, DataType::U16, false }, // no integral types - { DataType::U8, DataType::U8, DataType::U8, false }, // no integral types + { DataType::F32, DataType::F32, DataType::F32, DataType::F32, true }, + { DataType::F16, DataType::F16, DataType::F16, DataType::F16, true }, + { DataType::F32, DataType::F32, DataType::F32, DataType::F32, true }, + { DataType::F32, DataType::F32, DataType::F16, DataType::F32, false }, // incorrect bias type + { DataType::F16, DataType::F32, DataType::F32, DataType::F32, false }, // no mixed precision + { DataType::F64, DataType::F64, DataType::F64, DataType::F64, false }, // no double precision + { DataType::QASYMM8, DataType::QASYMM8, DataType::S32, DataType::QASYMM8, false }, // no quantized types + { DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, DataType::S32, DataType::QASYMM8_SIGNED, false }, // no quantized types + { DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8_PER_CHANNEL, DataType::S32, DataType::QSYMM8_PER_CHANNEL, false }, // no quantized types + { DataType::QASYMM16, DataType::QASYMM16, DataType::S32, DataType::QASYMM16, false }, // no quantized types + { DataType::QSYMM16, DataType::QSYMM16, DataType::S32, DataType::QSYMM16, false }, // no quantized types + { DataType::QSYMM8, DataType::QSYMM8, DataType::S32, DataType::QSYMM8, false }, // no quantized types + { DataType::S64, DataType::S64, DataType::S64, DataType::S64, false }, // no integral types + { DataType::S32, DataType::S32, DataType::S32, DataType::S32, false }, // no integral types + { DataType::S16, DataType::S16, DataType::S16, DataType::S16, false }, // no integral types + { DataType::S8, DataType::S8, DataType::S8, DataType::S8, false }, // no integral types + { DataType::U64, DataType::U64, DataType::U64, DataType::U64, false }, // no integral types + { DataType::U32, DataType::U32, DataType::U32, DataType::U32, false }, // no integral types + { DataType::U16, DataType::U16, DataType::U16, DataType::U16, false }, // no integral types + { DataType::U8, DataType::U8, DataType::U8, DataType::U8, false }, // no integral types }; - const TensorShape shape = TensorShape(8U, 8U); + const TensorShape shape = TensorShape(8U, 8U); + const TensorShape bia_shape = TensorShape(8U); const MatMulKernelInfo matmul_kernel_info{ false, false, 1, 1, 1, false }; for(auto &tuple : data_type_configurations) { - const bool expected = std::get<3>(tuple); + const bool expected = std::get<4>(tuple); const TensorInfo lhs_info(shape, 1, std::get<0>(tuple)); const TensorInfo rhs_info(shape, 1, std::get<1>(tuple)); - TensorInfo output_info(shape, 1, std::get<2>(tuple)); + const TensorInfo bia_info(bia_shape, 1, std::get<2>(tuple)); + TensorInfo output_info(shape, 1, std::get<3>(tuple)); - Status status = ClMatMulNativeMMULKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_kernel_info); + Status status = ClMatMulNativeMMULKernel::validate(&lhs_info, &rhs_info, &bia_info, &output_info, matmul_kernel_info); ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); } } @@ -292,7 +303,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulNativeMMULKernelFixture, framewo validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); } } -FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), +FIXTURE_DATA_TEST_CASE(RunWithBias, CLMatMulKernelBiasFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallMatMulMMULDataset(), + framework::dataset::make("TransposeA", { false, true })), + framework::dataset::make("TransposeB", { false, true })), + m0_values_precommit), + n0_values_precommit), + k0_value), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + if(_device_supports_mmul) + { + validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32); + } +} +FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), framework::dataset::make("TransposeA", { false })), framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_nt), @@ -308,7 +335,8 @@ FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), +FIXTURE_DATA_TEST_CASE(RunLargeRhsTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), framework::dataset::make("TransposeA", { false })), framework::dataset::make("TransposeB", { true })), m0_values_nightly_lhs_nt), @@ -323,14 +351,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeRhsTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), - framework::dataset::make("TransposeA", { true })), - framework::dataset::make("TransposeB", { false })), - m0_values_nightly_lhs_t), - n0_values_nightly_rhs_nt), - k0_value), - framework::dataset::make("ExportRhsToCLImage", { false })), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_nt), + k0_value), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F32))) { // Validate output // Validate output @@ -395,7 +424,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulNativeMMULKernelFixture, framewor validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16); } } -FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), +FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), framework::dataset::make("TransposeA", { false })), framework::dataset::make("TransposeB", { false })), m0_values_nightly_lhs_nt), @@ -410,7 +440,8 @@ FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), +FIXTURE_DATA_TEST_CASE(RunLargeRhsTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), framework::dataset::make("TransposeA", { false })), framework::dataset::make("TransposeB", { true })), m0_values_nightly_lhs_nt), @@ -425,14 +456,15 @@ FIXTURE_DATA_TEST_CASE(RunLargeRhsTranspose, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), - framework::dataset::make("TransposeA", { true })), - framework::dataset::make("TransposeB", { false })), - m0_values_nightly_lhs_t), - n0_values_nightly_rhs_nt), - k0_value), - framework::dataset::make("ExportRhsToCLImage", { false })), - framework::dataset::make("DataType", DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLargeLhsTransposed, CLMatMulNativeMMULKernelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeMatMulMMULDataset(), + framework::dataset::make("TransposeA", { true })), + framework::dataset::make("TransposeB", { false })), + m0_values_nightly_lhs_t), + n0_values_nightly_rhs_nt), + k0_value), + framework::dataset::make("ExportRhsToCLImage", { false })), + framework::dataset::make("DataType", DataType::F16))) { // Validate output // Validate output diff --git a/tests/validation/fixtures/MatMulKernelFixture.h b/tests/validation/fixtures/MatMulKernelFixture.h index 59bcfe5b2d..88fdf8b291 100644 --- a/tests/validation/fixtures/MatMulKernelFixture.h +++ b/tests/validation/fixtures/MatMulKernelFixture.h @@ -36,7 +36,7 @@ #include "tests/validation/reference/GEMMLowp.h" #include "tests/validation/reference/Permute.h" #include "tests/validation/reference/ReshapeLayer.h" - +#include #include namespace arm_compute @@ -48,12 +48,16 @@ namespace validation using namespace arm_compute::opencl::kernels; template -class MatMulKernelValidationFixture : public framework::Fixture +class MatMulKernelGenericValidationFixture : public framework::Fixture { public: template - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type, + bool enable_bias) { + // Flag to create a bias + _enable_bias = enable_bias; + // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices. QuantizationInfo lhs_q_info; QuantizationInfo rhs_q_info; @@ -138,6 +142,16 @@ protected: } } + template + void fill_bias_s32(U &&tensor, int i, const UniformQuantizationInfo &q_info) + { + // For quantized cases, fill the S32 bias according to the following to avoid saturation of test cases. + // The following code limits size of bias values to within expected range of output quantization. + const unsigned int bound = std::abs(q_info.scale * 256); // 256 is size of 8 bit datatype + std::uniform_int_distribution distribution(-(bound / 10), (bound / 10)); + library->fill(tensor, distribution, i); + } + template void fill_constant(U &&tensor, D value) { @@ -156,12 +170,15 @@ protected: matmul_info.k0 = K0; matmul_info.export_rhs_to_cl_image = export_rhs_to_cl_image; + bool is_quantized = is_data_type_quantized(data_type); + // Create tensors - CLTensor a = create_tensor(shape_a, data_type, 1, lhs_q_info); - CLTensor b = create_tensor(shape_b, data_type, 1, rhs_q_info); - CLTensor dst = create_tensor(output_shape, data_type, 1, dst_q_info); + CLTensor a = create_tensor(shape_a, data_type, 1, lhs_q_info); + CLTensor b = create_tensor(shape_b, data_type, 1, rhs_q_info); + CLTensor bias = create_tensor(output_shape[0], (is_quantized) ? DataType::S32 : data_type, 1, dst_q_info); + CLTensor dst = create_tensor(output_shape, data_type, 1, dst_q_info); - matMul.configure(a.info(), b.info(), dst.info(), matmul_info); + matMul.configure(a.info(), b.info(), (_enable_bias) ? bias.info() : nullptr, dst.info(), matmul_info); ARM_COMPUTE_ASSERT(a.info()->is_resizable()); ARM_COMPUTE_ASSERT(b.info()->is_resizable()); ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); @@ -184,6 +201,22 @@ protected: { ACL_SRC_1, &b }, { ACL_DST, &dst } }); + + if(_enable_bias) + { + // Allocate, fill and add bias to TensorPack obj + bias.allocator()->allocate(); + if(is_quantized) + { + fill_bias_s32(CLAccessor(bias), 2, dst_q_info.uniform()); + } + else + { + fill(CLAccessor(bias), 2); + } + tensors_pack.add_tensor(ACL_SRC_2, &bias); + } + matMul.run(tensors_pack); return dst; @@ -252,9 +285,21 @@ protected: template typename std::enable_if < std::is_same::value || std::is_same::value, SimpleTensor>::type gemm_reference(SimpleTensor &a, SimpleTensor &b, SimpleTensor &c) { + // Fill bias, then copy first dimension into subsequent dimensions to mimic broadcast + // of bias tensor from shape [dst.dimension(0)] to [dst.tensor_shape()] in target kernel + if(_enable_bias) + { + fill(c, 2); + const int n = c.shape().x(); + const int other_dims = c.shape().collapsed_from(1)[1]; + for(int i = 1; i < other_dims; ++i) // For all data, copy first n elements into remaining batches + { + memcpy(c.data() + i * n, c.data(), n * sizeof(T)); + } + } // Setting beta to 0 will effectively disable C for the // computation of the reference: alpha * A * B + 0 * C - return reference::gemm(a, b, c, 1.0f, 0.f); + return reference::gemm(a, b, c, 1.0f, (_enable_bias) ? 1.0f : 0.f); } template @@ -276,19 +321,59 @@ protected: constexpr int32_t gemmlowp_max_bound = std::numeric_limits::max(); SimpleTensor bias{ c.shape(), DataType::S32 }; - fill_constant(bias, static_cast(0)); + if(_enable_bias) + { + // Identical to float implementation, fill and copy values of bias first dimension + fill_bias_s32(bias, 2, cq); + const int n = bias.shape().x(); + const int other_dims = bias.shape().collapsed_from(1)[1]; + const unsigned int dt_size = sizeof(int32_t); + for(int i = 1; i < other_dims; ++i) + { + memcpy(bias.data() + i * n, bias.data(), n * dt_size); + } + } + else + { + fill_constant(bias, static_cast(0)); // effectively disable bias + } const SimpleTensor final_result = reference::gemmlowp_quantize_down_scale_by_fixedpoint(result, bias, gemmlowp_multipliers, gemmlowp_shifts, gemmlowp_offset, gemmlowp_min_bound, gemmlowp_max_bound); + return final_result; } CLTensor _target{}; SimpleTensor _reference{}; + bool _enable_bias{ false }; bool _device_supports_export_to_cl_image{ true }; bool _device_supports_mmul{ true }; }; +template +class MatMulKernelValidationFixture : public MatMulKernelGenericValidationFixture +{ +public: + template + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type) + { + MatMulKernelGenericValidationFixture::setup(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, + false /* enable bias */); + } +}; + +template +class MatMulKernelWithBiasValidation : public MatMulKernelGenericValidationFixture +{ +public: + template + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type) + { + MatMulKernelGenericValidationFixture::setup(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, + true /* enable bias */); + } +}; } // namespace validation } // namespace test } // namespace arm_compute -- cgit v1.2.1