From 19835e591cb0b66a0f5000ae1505bf299e50337d Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Tue, 30 Jan 2018 13:35:54 +0000 Subject: COMPMID-882 - Optimizing GEMMLowp on OpenCL reshaping matrices This new optimization allows to achieve 36.3 % of MAC utilisation on Mate 9 @ 1GHz. The performance have been reported here https://confluence.arm.com/display/MLENG/GEMMLowp+performance%3A+ACL+18.02 Change-Id: I71b6a217068763dfdc11bbf3574ee0eb94f93679 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/118531 Reviewed-by: Anthony Barbier Tested-by: Jenkins --- .../CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h | 10 +- src/core/CL/CLKernelLibrary.cpp | 3 +- src/core/CL/cl_kernels/gemm.cl | 54 ++- src/core/CL/cl_kernels/gemmlowp.cl | 409 ++++++++++++++++++--- .../CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp | 88 ++++- .../kernels/CLGEMMLowpOffsetContributionKernel.cpp | 12 +- src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp | 1 + src/runtime/CL/functions/CLGEMM.cpp | 10 +- .../CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp | 82 +++-- 9 files changed, 548 insertions(+), 121 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h index 3ad3ced003..b96e978b66 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -59,18 +59,20 @@ public: * @param[in] input1 Input tensor containing the transposed1xW Matrix B. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32 * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel + * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped */ - void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, bool is_interleaved_transposed = true); + void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyKernel * * @param[in] input0 Input tensor info containing the interleaved Matrix A. Data type supported: QASYMM8 * @param[in] input1 Input tensor info containing the transposed Matrix B. Data type supported: same as @p input0 * @param[in] output Output tensor info to store the result of matrix multiplication. Data type supported: S32 - * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel + * @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel + * @param[in] reshape_info GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped * * @return a status */ - static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed = true); + static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 0847612d21..5452b8a1be 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -237,7 +237,8 @@ const std::map CLKernelLibrary::_kernel_program_map = { "gemmlowp_matrix_b_reduction", "gemmlowp.cl" }, { "gemmlowp_mm_bifrost", "gemmlowp.cl" }, { "gemmlowp_mm_midgard", "gemmlowp.cl" }, - { "gemmlowp_mm_interleaved_transposed", "gemmlowp.cl" }, + { "gemmlowp_mm_interleaved_transposed_bifrost", "gemmlowp.cl" }, + { "gemmlowp_mm_interleaved_transposed_midgard", "gemmlowp.cl" }, { "gemmlowp_offset_contribution", "gemmlowp.cl" }, { "gemmlowp_output_stage_quantize_down", "gemmlowp.cl" }, { "gemmlowp_output_stage_quantize_down_fixedpoint", "gemmlowp.cl" }, diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl index bad09f3c42..58a550f77d 100644 --- a/src/core/CL/cl_kernels/gemm.cl +++ b/src/core/CL/cl_kernels/gemm.cl @@ -29,19 +29,20 @@ #if defined(TRANSPOSE_W) && defined(MULT_TRANSPOSE1XW_WIDTH) -#if TRANSPOSE_W == 4 -#define DATA_TYPE uint -#elif TRANSPOSE_W == 8 -#define DATA_TYPE ushort -#elif TRANSPOSE_W == 16 +#if ELEMENT_SIZE == 1 #define DATA_TYPE uchar -#else // TRANSPOSE_W == 16 -#error "Transpose width not supported" -#endif // TRANSPOSE_W +#elif ELEMENT_SIZE == 2 +#define DATA_TYPE ushort +#elif ELEMENT_SIZE == 4 +#define DATA_TYPE uint +#else // ELEMENT_SIZE == 1 +#error "Element size not supported" +#endif // ELEMENT_SIZE /** This OpenCL kernel computes the "vector" 1xW transposition of input matrix * - * @attention The multiplication factor (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2) + * @note The transposition width must be passed at compile time using -DTRANSPOSE_W (i.e. -DTRANSPOSE_W) + * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2) * * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) @@ -80,6 +81,9 @@ __kernel void gemm_transpose1xW(IMAGE_DECLARATION(src), #if defined(MULT_INTERLEAVE4X4_HEIGHT) && defined(DATA_TYPE) /** This OpenCL kernel reshapes the input matrix transposing each 4x4 block and interleaving the values + * + * @note The data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=float) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) * * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) @@ -137,7 +141,9 @@ __kernel void gemm_interleave4x4(IMAGE_DECLARATION(src), /** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1) * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication * - * @attention The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) * * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32 * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) @@ -240,7 +246,9 @@ __kernel void gemm_mm_interleaved_transposed_f32_midgard(IMAGE_DECLARATION(src0) /** This OpenCL kernel is optimized for Bifrost. It computes the matrix multiplication between matrix A (src0) and matrix B (src1) * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication * - * @attention The number of matrix B columns and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) * * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32 * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) @@ -461,7 +469,9 @@ __kernel void gemm_mm_interleaved_transposed_f32_bifrost(IMAGE_DECLARATION(src0) /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication * - * @attention The number of matrix B columns and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) * * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16 * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) @@ -566,7 +576,9 @@ __kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0), /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 8 bit fixed point precision * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication * - * @attention The number of matrix B columns, the optional alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION + * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) * * @note: ALPHA must be passed in 8 bit fixed point format * @@ -665,7 +677,9 @@ __kernel void gemm_mm_interleaved_transposed_qs8(IMAGE_DECLARATION(src0), /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 16 bit fixed point precision * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication * - * @attention The number of matrix B columns, the optional alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION + * @note The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA + * @note The multiplication factor for the transposition width (mult_transpose1xW_width) must be passed at compile time using -DMULT_TRANSPOSE1XW_WIDTH (i.e. -DMULT_TRANSPOSE1XW_WIDTH=2) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) * * @note: ALPHA must be passed in 16 bit fixed point format * @@ -1643,7 +1657,7 @@ __kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0), #if defined(BETA) /** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta: * - * @attention The beta's value need to be passed at compile time using -DBETA + * @note The beta's value need to be passed at compile time using -DBETA * * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) @@ -1680,7 +1694,7 @@ __kernel void gemm_ma_f32(IMAGE_DECLARATION(src), /** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta: * - * @attention The beta's value need to be passed at compile time using -DBETA + * @note The beta's value need to be passed at compile time using -DBETA * * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) @@ -1718,7 +1732,7 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src), #if defined(FIXED_POINT_POSITION) /** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 8 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta: * - * @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION + * @note The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION * * @note: BETA must be passed in 8 bit fixed point format * @@ -1757,7 +1771,7 @@ __kernel void gemm_ma_qs8(IMAGE_DECLARATION(src), /** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 16 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta: * - * @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION + * @note The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION * * @note: BETA must be passed in 16 bit fixed point format * @@ -1799,9 +1813,9 @@ __kernel void gemm_ma_qs16(IMAGE_DECLARATION(src), #if defined(WIDTH_VECTOR_A) /** This OpenCL kernel computes the vector by matrix multiplication between each row of A (src0) and matrix B (src1) used for locally connected layer * - * @attention The width of A need to be passed at compile time using -DWIDTH_VECTOR_A + * @note The width of A need to be passed at compile time using -DWIDTH_VECTOR_A * - * @attention The input A and matrix B must not be reshaped + * @note The input A and matrix B must not be reshaped * * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32 * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl index d724600cdd..5e144d73af 100644 --- a/src/core/CL/cl_kernels/gemmlowp.cl +++ b/src/core/CL/cl_kernels/gemmlowp.cl @@ -24,11 +24,13 @@ #include "helpers.h" #include "helpers_asymm.h" -#if defined(COLS_B) +#if defined(COLS_B) && defined(MULT_INTERLEAVE4X4_HEIGHT) && defined(TRANSPOSE1XW_WIDTH_STEP) /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) - * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication + * Matrix A and matrix B must be reshaped respectively with @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel before running the matrix multiplication * - * @attention The number of matrix B columns needs to be passed at compile time using -DCOLS_B + * @note The number of matrix B columns needs to be passed at compile time using -DCOLS_B: e.g. -DCOLS_B=1024 + * @note The transposition width step (mult_transpose1xW_width * 4) must be passed at compile time using -DTRANSPOSE1XW_WIDTH_STEP (i.e. -DTRANSPOSE1XW_WIDTH_STEP=2) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) * * @param[in] src0_ptr Pointer to the source matrix. Supported data type: QASYMM8 * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) @@ -49,69 +51,370 @@ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix */ -__kernel void gemmlowp_mm_interleaved_transposed(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), - IMAGE_DECLARATION(dst)) +__kernel void gemmlowp_mm_interleaved_transposed_midgard(IMAGE_DECLARATION(src0), + IMAGE_DECLARATION(src1), + IMAGE_DECLARATION(dst)) { - // src_addr.s0 = address of matrix A - // src_addr.s1 = address of matrix B - // Compute address for matrix A and B - int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y), - (src1_stride_y)); + int x = get_global_id(0) / TRANSPOSE1XW_WIDTH_STEP; + int y = get_global_id(1) / MULT_INTERLEAVE4X4_HEIGHT; + + // Offset + const int offset_row_a = (get_global_id(1) % MULT_INTERLEAVE4X4_HEIGHT) * 4; + const int offset_row_b = (get_global_id(0) % TRANSPOSE1XW_WIDTH_STEP) * 4; - // Add offset_first_element_in_bytes - src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); + // src_addr_a = address of matrix A + // src_addr_b = address of matrix B + __global uchar *src_addr_a = (__global uchar *)(src0_ptr + y * src0_stride_y + src0_offset_first_element_in_bytes); + __global uchar *src_addr_b = (__global uchar *)(src1_ptr + x * src1_stride_y + src1_offset_first_element_in_bytes); // Compute end row address for matrix B - int end_row_mtx_b = src_addr.s1 + COLS_B; + __global uchar *src_end_addr_b = src_addr_b + COLS_B; + + src_addr_a += offset_row_a; + src_addr_b += offset_row_b; // Reset accumulators - int16 c00 = 0; - int16 c10 = 0; - int16 c20 = 0; - int16 c30 = 0; + int4 c00 = 0; + int4 c10 = 0; + int4 c20 = 0; + int4 c30 = 0; - for(; src_addr.s1 <= (end_row_mtx_b - 32); src_addr += (int2)(8, 32)) + for(; src_addr_b <= (src_end_addr_b - (int)(8 * TRANSPOSE1XW_WIDTH_STEP)); src_addr_a += 8 * MULT_INTERLEAVE4X4_HEIGHT, src_addr_b += 8 * TRANSPOSE1XW_WIDTH_STEP) { // Load values from matrix A (interleaved) and matrix B (transposed) - int8 a0 = convert_int8(vload8(0, ((__global uchar *)src0_ptr) + src_addr.s0)); - int16 b0 = convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1)); + int4 a0 = convert_int4(vload4(0, src_addr_a)); + int4 b0 = convert_int4(vload4(0, src_addr_b)); - c00 += (int16)a0.s0 * b0; - c10 += (int16)a0.s1 * b0; - c20 += (int16)a0.s2 * b0; - c30 += (int16)a0.s3 * b0; + c00 += (int4)a0.s0 * b0; + c10 += (int4)a0.s1 * b0; + c20 += (int4)a0.s2 * b0; + c30 += (int4)a0.s3 * b0; + + a0 = convert_int4(vload4(0, src_addr_a + 4 * MULT_INTERLEAVE4X4_HEIGHT)); + b0 = convert_int4(vload4(0, src_addr_b + 4 * TRANSPOSE1XW_WIDTH_STEP)); + + c00 += (int4)a0.s0 * b0; + c10 += (int4)a0.s1 * b0; + c20 += (int4)a0.s2 * b0; + c30 += (int4)a0.s3 * b0; + } - int16 b1 = convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1 + 16)); + for(; src_addr_b < src_end_addr_b; src_addr_a += (4 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (4 * TRANSPOSE1XW_WIDTH_STEP)) + { + // Load values from matrix A (interleaved) and matrix B (transposed) + int4 a0 = convert_int4(vload4(0, src_addr_a)); + int4 b0 = convert_int4(vload4(0, src_addr_b)); - c00 += (int16)a0.s4 * b1; - c10 += (int16)a0.s5 * b1; - c20 += (int16)a0.s6 * b1; - c30 += (int16)a0.s7 * b1; + c00 += (int4)a0.s0 * b0; + c10 += (int4)a0.s1 * b0; + c20 += (int4)a0.s2 * b0; + c30 += (int4)a0.s3 * b0; } - for(; src_addr.s1 < end_row_mtx_b; src_addr += (int2)(4, 16)) + // Compute destination address + Image dst = CONVERT_TO_IMAGE_STRUCT(dst); + + // Store 4x4 block + vstore4(c00, 0, (__global int *)(offset(&dst, 0, 0))); + vstore4(c10, 0, (__global int *)(offset(&dst, 0, 1))); + vstore4(c20, 0, (__global int *)(offset(&dst, 0, 2))); + vstore4(c30, 0, (__global int *)(offset(&dst, 0, 3))); +} + +/** This OpenCL kernel is optimized for Bifrost and computes the matrix multiplication between matrix A (src0) and matrix B (src1) + * Matrix A and matrix B must be reshaped respectively with @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel before running the matrix multiplication + * + * @attention The number of matrix B columns needs to be passed at compile time using -DCOLS_B + * @note The transposition width step (mult_transpose1xW_width * 4) must be passed at compile time using -DTRANSPOSE1XW_WIDTH_STEP (i.e. -DTRANSPOSE1XW_WIDTH_STEP=2) + * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2) + * + * @param[in] src0_ptr Pointer to the source matrix. Supported data type: QASYMM8 + * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[in] src1_ptr Pointer to the source matrix. Supported data type: same as @p src0_ptr + * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32 + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemmlowp_mm_interleaved_transposed_bifrost(IMAGE_DECLARATION(src0), + IMAGE_DECLARATION(src1), + IMAGE_DECLARATION(dst)) +{ + int x = get_global_id(0) / TRANSPOSE1XW_WIDTH_STEP; + int y = get_global_id(1) / MULT_INTERLEAVE4X4_HEIGHT; + + // Offset + const int offset_row_a = (get_global_id(1) % MULT_INTERLEAVE4X4_HEIGHT) * 4; + const int offset_row_b = (get_global_id(0) % TRANSPOSE1XW_WIDTH_STEP) * 4; + + // src_addr_a = address of matrix A + // src_addr_b = address of matrix B + __global uchar *src_addr_a = (__global uchar *)(src0_ptr + y * src0_stride_y + src0_offset_first_element_in_bytes); + __global uchar *src_addr_b = (__global uchar *)(src1_ptr + x * src1_stride_y + src1_offset_first_element_in_bytes); + + // Compute end row address for matrix B + __global uchar *src_end_addr_b = src_addr_b + COLS_B; + + src_addr_a += offset_row_a; + src_addr_b += offset_row_b; + + // Reset accumulators + uint c00 = 0; + uint c01 = 0; + uint c02 = 0; + uint c03 = 0; + uint c10 = 0; + uint c11 = 0; + uint c12 = 0; + uint c13 = 0; + uint c20 = 0; + uint c21 = 0; + uint c22 = 0; + uint c23 = 0; + uint c30 = 0; + uint c31 = 0; + uint c32 = 0; + uint c33 = 0; + +#if MULT_INTERLEAVE4X4_HEIGHT == 1 + for(; src_addr_b <= (src_end_addr_b - (int)(32 * TRANSPOSE1XW_WIDTH_STEP)); src_addr_a += (32 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (32 * TRANSPOSE1XW_WIDTH_STEP)) { // Load values from matrix A (interleaved) and matrix B (transposed) - int4 a0 = convert_int4(vload4(0, ((__global uchar *)src0_ptr) + src_addr.s0)); - int16 b0 = convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1)); + uchar16 a0 = vload16(0, src_addr_a); + uchar4 b0 = vload4(0, src_addr_b); + + c00 += (ushort)a0.s0 * b0.s0; + c01 += (ushort)a0.s0 * b0.s1; + c02 += (ushort)a0.s0 * b0.s2; + c03 += (ushort)a0.s0 * b0.s3; + + c10 += (ushort)a0.s1 * b0.s0; + c11 += (ushort)a0.s1 * b0.s1; + c12 += (ushort)a0.s1 * b0.s2; + c13 += (ushort)a0.s1 * b0.s3; + + c20 += (ushort)a0.s2 * b0.s0; + c21 += (ushort)a0.s2 * b0.s1; + c22 += (ushort)a0.s2 * b0.s2; + c23 += (ushort)a0.s2 * b0.s3; + + c30 += (ushort)a0.s3 * b0.s0; + c31 += (ushort)a0.s3 * b0.s1; + c32 += (ushort)a0.s3 * b0.s2; + c33 += (ushort)a0.s3 * b0.s3; + + // Load values from matrix B (transposed) + b0 = vload4(0, src_addr_b + 4 * TRANSPOSE1XW_WIDTH_STEP); + + c00 += (ushort)a0.s4 * b0.s0; + c01 += (ushort)a0.s4 * b0.s1; + c02 += (ushort)a0.s4 * b0.s2; + c03 += (ushort)a0.s4 * b0.s3; + + c10 += (ushort)a0.s5 * b0.s0; + c11 += (ushort)a0.s5 * b0.s1; + c12 += (ushort)a0.s5 * b0.s2; + c13 += (ushort)a0.s5 * b0.s3; + + c20 += (ushort)a0.s6 * b0.s0; + c21 += (ushort)a0.s6 * b0.s1; + c22 += (ushort)a0.s6 * b0.s2; + c23 += (ushort)a0.s6 * b0.s3; + + c30 += (ushort)a0.s7 * b0.s0; + c31 += (ushort)a0.s7 * b0.s1; + c32 += (ushort)a0.s7 * b0.s2; + c33 += (ushort)a0.s7 * b0.s3; + + // Load values from matrix B (transposed) + b0 = vload4(0, src_addr_b + 8 * TRANSPOSE1XW_WIDTH_STEP); + + c00 += (ushort)a0.s8 * b0.s0; + c01 += (ushort)a0.s8 * b0.s1; + c02 += (ushort)a0.s8 * b0.s2; + c03 += (ushort)a0.s8 * b0.s3; + + c10 += (ushort)a0.s9 * b0.s0; + c11 += (ushort)a0.s9 * b0.s1; + c12 += (ushort)a0.s9 * b0.s2; + c13 += (ushort)a0.s9 * b0.s3; + + c20 += (ushort)a0.sA * b0.s0; + c21 += (ushort)a0.sA * b0.s1; + c22 += (ushort)a0.sA * b0.s2; + c23 += (ushort)a0.sA * b0.s3; + + c30 += (ushort)a0.sB * b0.s0; + c31 += (ushort)a0.sB * b0.s1; + c32 += (ushort)a0.sB * b0.s2; + c33 += (ushort)a0.sB * b0.s3; + + // Load values from matrix B (transposed) + b0 = vload4(0, src_addr_b + 12 * TRANSPOSE1XW_WIDTH_STEP); + + c00 += (ushort)a0.sC * b0.s0; + c01 += (ushort)a0.sC * b0.s1; + c02 += (ushort)a0.sC * b0.s2; + c03 += (ushort)a0.sC * b0.s3; + + c10 += (ushort)a0.sD * b0.s0; + c11 += (ushort)a0.sD * b0.s1; + c12 += (ushort)a0.sD * b0.s2; + c13 += (ushort)a0.sD * b0.s3; + + c20 += (ushort)a0.sE * b0.s0; + c21 += (ushort)a0.sE * b0.s1; + c22 += (ushort)a0.sE * b0.s2; + c23 += (ushort)a0.sE * b0.s3; + + c30 += (ushort)a0.sF * b0.s0; + c31 += (ushort)a0.sF * b0.s1; + c32 += (ushort)a0.sF * b0.s2; + c33 += (ushort)a0.sF * b0.s3; + + // Load values from matrix A (interleaved) and matrix B (transposed) + a0 = vload16(0, src_addr_a + 16); + b0 = vload4(0, src_addr_b + 16 * TRANSPOSE1XW_WIDTH_STEP); + + c00 += (ushort)a0.s0 * b0.s0; + c01 += (ushort)a0.s0 * b0.s1; + c02 += (ushort)a0.s0 * b0.s2; + c03 += (ushort)a0.s0 * b0.s3; + + c10 += (ushort)a0.s1 * b0.s0; + c11 += (ushort)a0.s1 * b0.s1; + c12 += (ushort)a0.s1 * b0.s2; + c13 += (ushort)a0.s1 * b0.s3; + + c20 += (ushort)a0.s2 * b0.s0; + c21 += (ushort)a0.s2 * b0.s1; + c22 += (ushort)a0.s2 * b0.s2; + c23 += (ushort)a0.s2 * b0.s3; + + c30 += (ushort)a0.s3 * b0.s0; + c31 += (ushort)a0.s3 * b0.s1; + c32 += (ushort)a0.s3 * b0.s2; + c33 += (ushort)a0.s3 * b0.s3; + + // Load values from matrix B (transposed) + b0 = vload4(0, src_addr_b + 20 * TRANSPOSE1XW_WIDTH_STEP); + + c00 += (ushort)a0.s4 * b0.s0; + c01 += (ushort)a0.s4 * b0.s1; + c02 += (ushort)a0.s4 * b0.s2; + c03 += (ushort)a0.s4 * b0.s3; + + c10 += (ushort)a0.s5 * b0.s0; + c11 += (ushort)a0.s5 * b0.s1; + c12 += (ushort)a0.s5 * b0.s2; + c13 += (ushort)a0.s5 * b0.s3; + + c20 += (ushort)a0.s6 * b0.s0; + c21 += (ushort)a0.s6 * b0.s1; + c22 += (ushort)a0.s6 * b0.s2; + c23 += (ushort)a0.s6 * b0.s3; + + c30 += (ushort)a0.s7 * b0.s0; + c31 += (ushort)a0.s7 * b0.s1; + c32 += (ushort)a0.s7 * b0.s2; + c33 += (ushort)a0.s7 * b0.s3; + + // Load values from matrix B (transposed) + b0 = vload4(0, src_addr_b + 24 * TRANSPOSE1XW_WIDTH_STEP); + + c00 += (ushort)a0.s8 * b0.s0; + c01 += (ushort)a0.s8 * b0.s1; + c02 += (ushort)a0.s8 * b0.s2; + c03 += (ushort)a0.s8 * b0.s3; + + c10 += (ushort)a0.s9 * b0.s0; + c11 += (ushort)a0.s9 * b0.s1; + c12 += (ushort)a0.s9 * b0.s2; + c13 += (ushort)a0.s9 * b0.s3; + + c20 += (ushort)a0.sA * b0.s0; + c21 += (ushort)a0.sA * b0.s1; + c22 += (ushort)a0.sA * b0.s2; + c23 += (ushort)a0.sA * b0.s3; + + c30 += (ushort)a0.sB * b0.s0; + c31 += (ushort)a0.sB * b0.s1; + c32 += (ushort)a0.sB * b0.s2; + c33 += (ushort)a0.sB * b0.s3; + + // Load values from matrix B (transposed) + b0 = vload4(0, src_addr_b + 28 * TRANSPOSE1XW_WIDTH_STEP); + + c00 += (ushort)a0.sC * b0.s0; + c01 += (ushort)a0.sC * b0.s1; + c02 += (ushort)a0.sC * b0.s2; + c03 += (ushort)a0.sC * b0.s3; + + c10 += (ushort)a0.sD * b0.s0; + c11 += (ushort)a0.sD * b0.s1; + c12 += (ushort)a0.sD * b0.s2; + c13 += (ushort)a0.sD * b0.s3; + + c20 += (ushort)a0.sE * b0.s0; + c21 += (ushort)a0.sE * b0.s1; + c22 += (ushort)a0.sE * b0.s2; + c23 += (ushort)a0.sE * b0.s3; + + c30 += (ushort)a0.sF * b0.s0; + c31 += (ushort)a0.sF * b0.s1; + c32 += (ushort)a0.sF * b0.s2; + c33 += (ushort)a0.sF * b0.s3; + } +#endif // MULT_INTERLEAVE4X4_HEIGHT == 1 - c00 += (int16)a0.s0 * b0; - c10 += (int16)a0.s1 * b0; - c20 += (int16)a0.s2 * b0; - c30 += (int16)a0.s3 * b0; + for(; src_addr_b < src_end_addr_b; src_addr_a += (4 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (4 * TRANSPOSE1XW_WIDTH_STEP)) + { + // Load values from matrix A (interleaved) and matrix B (transposed) + uchar4 a0 = vload4(0, src_addr_a); + uchar4 b0 = vload4(0, src_addr_b); + + c00 += (ushort)a0.s0 * b0.s0; + c01 += (ushort)a0.s0 * b0.s1; + c02 += (ushort)a0.s0 * b0.s2; + c03 += (ushort)a0.s0 * b0.s3; + + c10 += (ushort)a0.s1 * b0.s0; + c11 += (ushort)a0.s1 * b0.s1; + c12 += (ushort)a0.s1 * b0.s2; + c13 += (ushort)a0.s1 * b0.s3; + + c20 += (ushort)a0.s2 * b0.s0; + c21 += (ushort)a0.s2 * b0.s1; + c22 += (ushort)a0.s2 * b0.s2; + c23 += (ushort)a0.s2 * b0.s3; + + c30 += (ushort)a0.s3 * b0.s0; + c31 += (ushort)a0.s3 * b0.s1; + c32 += (ushort)a0.s3 * b0.s2; + c33 += (ushort)a0.s3 * b0.s3; } // Compute destination address Image dst = CONVERT_TO_IMAGE_STRUCT(dst); - // Store 4x16 block - vstore16(c00, 0, (__global int *)(offset(&dst, 0, 0))); - vstore16(c10, 0, (__global int *)(offset(&dst, 0, 1))); - vstore16(c20, 0, (__global int *)(offset(&dst, 0, 2))); - vstore16(c30, 0, (__global int *)(offset(&dst, 0, 3))); + // Store 4x4 block + vstore4((int4)(c00, c01, c02, c03), 0, (__global int *)(offset(&dst, 0, 0))); + vstore4((int4)(c10, c11, c12, c13), 0, (__global int *)(offset(&dst, 0, 1))); + vstore4((int4)(c20, c21, c22, c23), 0, (__global int *)(offset(&dst, 0, 2))); + vstore4((int4)(c30, c31, c32, c33), 0, (__global int *)(offset(&dst, 0, 3))); } -#endif // defined(COLS_B) +#endif // defined(COLS_B) && defined(MULT_INTERLEAVE4X4_HEIGHT) && defined(TRANSPOSE1XW_WIDTH_STEP) #if defined(NUM_ELEMS_PROCESSED_PER_THREAD_X) && defined(NUM_ELEMS_PROCESSED_PER_THREAD_Y) && defined(COLS_A) #define VECTOR_UCHAR VEC_DATA_TYPE(uchar, NUM_ELEMS_PROCESSED_PER_THREAD_X) @@ -788,39 +1091,39 @@ __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result) { Tensor3D mm_result = CONVERT_TO_TENSOR3D_STRUCT(mm_result); - int16 a_offset_s32 = (int16)0; - int16 b_offset_s32 = (int16)0; + int4 a_offset_s32 = (int4)0; + int4 b_offset_s32 = (int4)0; #if defined(A_OFFSET) Image sum_col = CONVERT_TO_IMAGE_STRUCT(sum_col); // Compute the offset contribution due to A_OFFSET #if defined(SUM_COL_HAS_BATCHES) - a_offset_s32 = vload16(0, (__global int *)(sum_col.ptr + get_global_id(2) * sum_col_stride_y)); + a_offset_s32 = vload4(0, (__global int *)(sum_col.ptr + get_global_id(2) * sum_col_stride_y)); #else // defined(MATRIX_B_HAS_BATCHES) - a_offset_s32 = vload16(0, (__global int *)(sum_col.ptr)); + a_offset_s32 = vload4(0, (__global int *)(sum_col.ptr)); #endif // defined(MATRIX_B_HAS_BATCHES) - a_offset_s32 *= (int16)A_OFFSET; + a_offset_s32 *= (int4)A_OFFSET; #endif // defined(A_OFFSET) #if defined(B_OFFSET) Image sum_row = CONVERT_TO_IMAGE_STRUCT(sum_row); // Compute the offset contribution due to B_OFFSET - b_offset_s32 = (int16) * (((__global int *)(sum_row.ptr + get_global_id(2) * sum_row_stride_y)) + get_global_id(1)); - b_offset_s32 *= (int16)B_OFFSET; + b_offset_s32 = (int4) * (((__global int *)(sum_row.ptr + get_global_id(2) * sum_row_stride_y)) + get_global_id(1)); + b_offset_s32 *= (int4)B_OFFSET; #endif // defined(B_OFFSET) - const int16 offset_term_s32 = (int16)K_OFFSET + a_offset_s32 + b_offset_s32; + const int4 offset_term_s32 = (int4)K_OFFSET + a_offset_s32 + b_offset_s32; - int16 in_s32 = vload16(0, (__global int *)mm_result.ptr); + int4 in_s32 = vload4(0, (__global int *)mm_result.ptr); // Add the offset terms to GEMM's result in_s32 += offset_term_s32; // Store the result with the offset contribution - vstore16(in_s32, 0, (__global int *)mm_result.ptr); + vstore4(in_s32, 0, (__global int *)mm_result.ptr); } #endif // defined(K_OFFSET) diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp index 2f96724210..ae498ec8a7 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp @@ -24,6 +24,7 @@ #include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h" #include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/AccessWindowTranspose.h" #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/ICLTensor.h" @@ -34,6 +35,7 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "support/ToolchainSupport.h" #include @@ -41,6 +43,7 @@ #include using namespace arm_compute; +using namespace arm_compute::misc::shape_calculator; namespace arm_compute { @@ -51,14 +54,53 @@ namespace { using ElementsProcessed = Steps; -Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed) +Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); + if(!is_interleaved_transposed) { ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != input1->dimension(1)); + + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON(input1->dimension(0) != output->dimension(0)); + ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) != output->dimension(1)); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + } + } + else + { + const int m = reshape_info.m(); + const int n = reshape_info.n(); + const int k = reshape_info.k(); + const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); + const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); + + TensorShape tensor_shape0{ input0->tensor_shape() }; + tensor_shape0.set(0, k); + tensor_shape0.set(1, m); + + TensorShape tensor_shape1{ input1->tensor_shape() }; + tensor_shape1.set(0, n); + tensor_shape1.set(1, k); + + const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0); + const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1); + + const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_interleaved_shape(tensor_info0, mult_interleave4x4_height)); + const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(tensor_info1, mult_transpose1xW_width)); + + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1); + + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != static_cast(n)); + ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != static_cast(m)); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + } } return Status{}; @@ -76,16 +118,14 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe // Check if the output tensor is a vector. If so,the kernel runs the vector-matrix multiplication if(is_interleaved_transposed) { - // Configure window - num_elems_processed_per_iteration_x = 16; - num_elems_processed_per_iteration_y = 4; - constexpr unsigned int num_elems_read_per_iteration_input0 = 4; - constexpr unsigned int num_elems_read_per_iteration_input1 = 16; + // Configure kernel window + num_elems_processed_per_iteration_x = 4; + num_elems_processed_per_iteration_y = 4; win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); - AccessWindowRectangle input0_access(input0, 0, 0, num_elems_read_per_iteration_input0, 1); - AccessWindowRectangle input1_access(input1, 0, 0, num_elems_read_per_iteration_input1, 1); + AccessWindowRectangle input0_access(input0, 0, 0, num_elems_processed_per_iteration_y, 1, 1.f, 0.25f); + AccessWindowTranspose input1_access(input1, 0, 0, num_elems_processed_per_iteration_x, 1, 0.f, 0.25f); AccessWindowRectangle output_access(output, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y); window_changed = update_window_and_padding(win, input0_access, input1_access, output_access); @@ -122,10 +162,18 @@ CLGEMMLowpMatrixMultiplyKernel::CLGEMMLowpMatrixMultiplyKernel() { } -void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, bool is_interleaved_transposed) +void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), is_interleaved_transposed)); + + // Output tensor auto inizialitation if not yet initialized + TensorShape tensor_shape{ input0->info()->tensor_shape() }; + tensor_shape.set(0, is_interleaved_transposed ? reshape_info.n() : input1->info()->dimension(0)); + tensor_shape.set(1, is_interleaved_transposed ? reshape_info.m() : input0->info()->dimension(1)); + + auto_init_if_empty(*output->info(), tensor_shape, 1, DataType::S32, 1, QuantizationInfo()); + + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), is_interleaved_transposed, reshape_info)); _input0 = input0; _input1 = input1; @@ -146,8 +194,18 @@ void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const IC std::string kernel_name(" "); if(is_interleaved_transposed) { + const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); + const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); + + // Note: The computation tile has the x dimension equal to 4 which is less than the transpose_width (16) + // In order to access correctly the elements from the transposed matrix B, we need to pass + // the correct step which is calculated as (16 * mult_transpose1xW_width) / 4) + build_opts.add_option("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0))); - kernel_name = "gemmlowp_mm_interleaved_transposed"; + build_opts.add_option("-DTRANSPOSE1XW_WIDTH_STEP=" + support::cpp11::to_string(4 * mult_transpose1xW_width)); + build_opts.add_option("-DMULT_INTERLEAVE4X4_HEIGHT=" + support::cpp11::to_string(mult_interleave4x4_height)); + + kernel_name = "gemmlowp_mm_interleaved_transposed_" + string_from_target(arch_target); } else { @@ -171,10 +229,10 @@ void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const IC _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1))); } -Status CLGEMMLowpMatrixMultiplyKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed) +Status CLGEMMLowpMatrixMultiplyKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info) { ElementsProcessed num_elements_processed{}; - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, is_interleaved_transposed)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, is_interleaved_transposed, reshape_info)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(), input1->clone().get(), output->clone().get(), diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp index d05939fcf5..221a1566b9 100644 --- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -91,7 +91,7 @@ Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vecto std::pair validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, int32_t a_offset, int32_t b_offset) { - constexpr unsigned int num_elems_processed_per_iteration = 16; + constexpr unsigned int num_elems_processed_per_iteration = 4; bool window_changed = false; // Configure kernel window @@ -160,6 +160,14 @@ void CLGEMMLowpOffsetContributionKernel::configure(ICLTensor *mm_result, const I a_offset, b_offset); // NOLINT ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure(win_config.second); + + // Set config_id for enabling LWS tuning + _config_id = "gemmlowp_offset_contribution_"; + _config_id += support::cpp11::to_string(mm_result->info()->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(mm_result->info()->dimension(1)); + _config_id += "_"; + _config_id += support::cpp11::to_string(mm_result->info()->dimension(2)); } Status CLGEMMLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, diff --git a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp index 63aed6df32..24d218760e 100644 --- a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp +++ b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp @@ -113,6 +113,7 @@ void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *outp // Create build options CLBuildOptions build_opts; + build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(input->info()->element_size())); build_opts.add_option("-DTRANSPOSE_W=" + support::cpp11::to_string(num_elems_processed_per_iteration)); build_opts.add_option("-DMULT_TRANSPOSE1XW_WIDTH=" + support::cpp11::to_string(mult_transpose1xW_width)); diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index a09849ab93..f02eb169b7 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -50,7 +50,7 @@ inline bool is_interleaved_transposed(int m, int n, int k, DataType data_type, b if(k > 256 && m > 4 && data_type == DataType::F32 && reshape_b_only_on_first_run) { const float scale = k < 1024 ? 2.0f : 2.5f; - flag = scale * n > 1.66f * n + 38.4f; + flag = (scale * n) > ((1.66f * n) + 38.4f); } else { @@ -122,6 +122,10 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * matrix_a = &_tmp_a; matrix_b = &_tmp_b; + // Manage intermediate buffers + _memory_group.manage(&_tmp_a); + _memory_group.manage(&_tmp_b); + // _tmp_a and _tmp_b will be auto configured in _interleave_kernel and in _transpose_kernel // Configure interleave kernel @@ -129,10 +133,6 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * // Configure transpose kernel _transpose_kernel.configure(b, &_tmp_b, mult_transpose1xW_width); - - // Manage intermediate buffers - _memory_group.manage(&_tmp_a); - _memory_group.manage(&_tmp_b); } _mm_kernel.configure(matrix_a, matrix_b, output, alpha, _is_interleaved_transposed, GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height)); diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp index 5f886a02c6..c688299d4f 100644 --- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp @@ -35,6 +35,29 @@ using namespace arm_compute; using namespace arm_compute::misc::shape_calculator; +namespace +{ +inline bool is_interleaved_transposed(int m, int n, int k, bool reshape_b_only_on_first_run, GPUTarget gpu_target) +{ + bool flag = true; + + if(gpu_target == GPUTarget::BIFROST) + { + // COMPMID-852 + if(k > 256 && m > 4 && reshape_b_only_on_first_run) + { + flag = ((0.72f + n * 0.10766f) < (n * 0.1284f)); + } + else + { + flag = false; + } + } + + return flag; +} +} // namespace + CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr memory_manager) : _memory_group(std::move(memory_manager)), _mm_kernel(), _mtx_a_reshape_kernel(), _mtx_b_reshape_kernel(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _a_offset(0), _b_offset(0), _is_interleaved_transposed(true), _is_first_run(true), _reshape_b_only_on_first_run(false) @@ -51,36 +74,45 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor _a_offset = a->info()->quantization_info().offset; _b_offset = b->info()->quantization_info().offset; - // If the input tensor has less than 16 rows, we run a special version of GEMMLowp without reshaping the input tensors - _is_interleaved_transposed = (a->info()->dimension(1)) > 16 && (CLScheduler::get().target() != GPUTarget::BIFROST); + // Get the GPU target + const GPUTarget gpu_target = CLScheduler::get().target(); - // Set the target for the matrix multiply kernel - _mm_kernel.set_target(CLScheduler::get().target()); + // Set the target for the kernels + _mtx_a_reshape_kernel.set_target(gpu_target); + _mm_kernel.set_target(gpu_target); const ICLTensor *matrix_a = a; const ICLTensor *matrix_b = b; + // Arguments used by GEMMReshapeInfo + // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo + // in order to know how the matrices have been reshaped + const int m = a->info()->dimension(1); + const int n = b->info()->dimension(0); + const int k = a->info()->dimension(0); + constexpr int mult_transpose1xW_width = 1; + constexpr int mult_interleave4x4_height = 1; + + // Check if we need to reshape the matrix A and matrix B + _is_interleaved_transposed = is_interleaved_transposed(m, n, k, _reshape_b_only_on_first_run, gpu_target); + if(_is_interleaved_transposed) { matrix_a = &_tmp_a; matrix_b = &_tmp_b; - TensorInfo info_a(compute_interleaved_shape(*a->info()), 1, a->info()->data_type()); - TensorInfo info_b(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type()); - _tmp_a.allocator()->init(info_a); - _tmp_b.allocator()->init(info_b); _memory_group.manage(&_tmp_a); _memory_group.manage(&_tmp_b); // Configure interleave kernel - _mtx_a_reshape_kernel.configure(a, &_tmp_a); + _mtx_a_reshape_kernel.configure(a, &_tmp_a, mult_interleave4x4_height); // Configure transpose kernel - _mtx_b_reshape_kernel.configure(b, &_tmp_b); + _mtx_b_reshape_kernel.configure(b, &_tmp_b, mult_transpose1xW_width); } // Configure matrix multiply kernel - _mm_kernel.configure(matrix_a, matrix_b, output, _is_interleaved_transposed); + _mm_kernel.configure(matrix_a, matrix_b, output, _is_interleaved_transposed, GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height)); // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 if(_a_offset != 0) @@ -139,22 +171,30 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); - int32_t a_offset = a->quantization_info().offset; - int32_t b_offset = b->quantization_info().offset; - bool is_interleaved_transposed = (a->dimension(1)) > 16 && (CLScheduler::get().target() != GPUTarget::BIFROST); + int32_t a_offset = a->quantization_info().offset; + int32_t b_offset = b->quantization_info().offset; + + const int m = a->dimension(1); + const int n = b->dimension(0); + const int k = a->dimension(0); + constexpr int mult_transpose1xW_width = 1; + constexpr int mult_interleave4x4_height = 1; + const GEMMReshapeInfo reshape_info(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height); + + bool reshape_matrices = is_interleaved_transposed(m, n, k, gemm_info.reshape_b_only_on_first_run(), CLScheduler::get().target()); - if(is_interleaved_transposed) + if(reshape_matrices) { - TensorInfo info_a(compute_interleaved_shape(*a), 1, a->data_type()); - TensorInfo info_b(compute_transpose1xW_shape(*b), 1, b->data_type()); + TensorInfo info_a(compute_interleaved_shape(*a, mult_interleave4x4_height), 1, a->data_type()); + TensorInfo info_b(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width), 1, b->data_type()); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &info_a, 1)); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &info_b, 1)); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(&info_a, &info_b, output)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &info_a, mult_interleave4x4_height)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &info_b, mult_transpose1xW_width)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(&info_a, &info_b, output, reshape_matrices, reshape_info)); } else { - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(a, b, output)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(a, b, output, reshape_matrices, reshape_info)); } TensorInfo info_vector_sum_col, info_vector_sum_row; -- cgit v1.2.1