From 8a383694445dfebb84732b19d5b3299961e8ffe3 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Mon, 3 Jul 2017 17:41:47 +0100 Subject: COMPMID-434 - Port CLGEMM to support 16 bit fixed point Change-Id: I30aef3c7ecd1ee740c2a7f2ce65a63c7dcd66e49 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/79630 Reviewed-by: Anthony Barbier Tested-by: Kaizen --- .../core/CL/kernels/CLGEMMInterleave4x4Kernel.h | 2 +- .../core/CL/kernels/CLGEMMMatrixAdditionKernel.h | 2 +- .../core/CL/kernels/CLGEMMMatrixMultiplyKernel.h | 2 +- .../core/CL/kernels/CLGEMMTranspose1xWKernel.h | 2 +- arm_compute/runtime/CL/functions/CLGEMM.h | 2 +- src/core/CL/CLKernelLibrary.cpp | 3 + src/core/CL/cl_kernels/fixed_point.h | 15 ++ src/core/CL/cl_kernels/gemm.cl | 215 ++++++++++++++++++++- src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp | 3 +- src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp | 17 +- src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp | 16 +- src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp | 3 +- src/runtime/CL/functions/CLGEMM.cpp | 2 +- tests/validation/CL/GEMM.cpp | 12 +- 14 files changed, 270 insertions(+), 26 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h b/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h index 9466b16a91..203e0fc6c4 100644 --- a/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h @@ -64,7 +64,7 @@ public: CLGEMMInterleave4x4Kernel &operator=(CLGEMMInterleave4x4Kernel &&) = default; /** Initialise the kernel's input and output. * - * @param[in] input Input tensor. Data types supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32 + * @param[in] input Input tensor. Data types supported: U8/S8/QS8/U16/S16/QS16/F16/U32/S32/F32 * @param[out] output Output tensor. Data type supported: same as @p input */ void configure(const ICLTensor *input, ICLTensor *output); diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h index b3a85a1706..ada67d1eaf 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h @@ -52,7 +52,7 @@ public: * * @note The input and output tensors must have the same dimensions * - * @param[in] input Input tensor (Matrix C). Data types supported: QS8/F16/F32 + * @param[in] input Input tensor (Matrix C). Data types supported: QS8/QS16/F16/F32 * @param[in, out] output Output tensor. If this kernel is used to finalize the GEMM result (alpha * AB + beta * C), output must contain the result obtained by @ref CLGEMMMatrixMultiplyKernel. Data type supported: same as @p input * @param[in] beta Weight of matrix C */ diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h index 7625358b8b..dec63e0679 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h @@ -53,7 +53,7 @@ public: CLGEMMMatrixMultiplyKernel &operator=(CLGEMMMatrixMultiplyKernel &&) = default; /** Initialise the kernel's input, output and alpha * - * @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: QS8/F16/F32 + * @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: QS8/QS16/F16/F32 * @param[in] input1 Input tensor containing the transposed Matrix B if the first input tensor A is not a vector. * If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 diff --git a/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h b/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h index 9657a2af45..0e467aac13 100644 --- a/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h @@ -70,7 +70,7 @@ class CLGEMMTranspose1xWKernel : public ICLSimple2DKernel public: /** Initialise the kernel's input and output. * - * @param[in] input Input tensor. Data types supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32 + * @param[in] input Input tensor. Data types supported: U8/S8/QS8/U16/S16/QS16/F16/U32/S32/F32 * @param[out] output Output tensor. Data type supported: same as @p input */ void configure(const ICLTensor *input, ICLTensor *output); diff --git a/arm_compute/runtime/CL/functions/CLGEMM.h b/arm_compute/runtime/CL/functions/CLGEMM.h index 080f497b7b..9207efd68f 100644 --- a/arm_compute/runtime/CL/functions/CLGEMM.h +++ b/arm_compute/runtime/CL/functions/CLGEMM.h @@ -57,7 +57,7 @@ public: * * @note Whilst the first input tensor can be a vector, the second input tensor must be at least a matrix * - * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/F16/F32 + * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/QS16/F16/F32 * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a. * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. * @param[out] output Output tensor. Data type supported: same as @p a diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index dd3531e858..72230435d8 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -158,14 +158,17 @@ const std::map CLKernelLibrary::_kernel_program_map = { "gemm_ma_f16", "gemm.cl" }, { "gemm_ma_f32", "gemm.cl" }, { "gemm_ma_qs8", "gemm.cl" }, + { "gemm_ma_qs16", "gemm.cl" }, { "gemm_mm_u8", "gemm.cl" }, { "gemm_mm_f16", "gemm.cl" }, { "gemm_mm_f32_midgard", "gemm.cl" }, { "gemm_mm_f32_bifrost", "gemm.cl" }, { "gemm_mm_qs8", "gemm.cl" }, + { "gemm_mm_qs16", "gemm.cl" }, { "gemm_vm_f16", "gemm.cl" }, { "gemm_vm_f32", "gemm.cl" }, { "gemm_vm_qs8", "gemm.cl" }, + { "gemm_vm_qs16", "gemm.cl" }, { "gemm_lc_vm_f32", "gemm.cl" }, { "gemm_transpose1x16", "gemm.cl" }, { "gemm_transpose1x8", "gemm.cl" }, diff --git a/src/core/CL/cl_kernels/fixed_point.h b/src/core/CL/cl_kernels/fixed_point.h index 32e49c2fad..dcdf840444 100644 --- a/src/core/CL/cl_kernels/fixed_point.h +++ b/src/core/CL/cl_kernels/fixed_point.h @@ -35,16 +35,21 @@ TYPE_ALIAS(char, qs8) TYPE_ALIAS(short, qs16) +TYPE_ALIAS(int, qs32) #define qs8_MIN ((char)CHAR_MIN) #define qs8_MAX ((char)CHAR_MAX) #define qs16_MIN ((short)SHRT_MIN) #define qs16_MAX ((short)SHRT_MAX) +#define qs32_MIN ((int)INT_MIN) +#define qs32_MAX ((int)INT_MAX) #define qu8_MIN ((uchar)0) #define qu8_MAX ((uchar)UCHAR_MAX) #define qu16_MIN ((ushort)0) #define qu16_MAX ((ushort)USHRT_MAX) +#define qu32_MIN ((uint)0) +#define qu32_MAX ((uint)UINT_MAX) #define qs8_TYPE char #define qs8x1_TYPE char @@ -60,6 +65,13 @@ TYPE_ALIAS(short, qs16) #define qs16x8_TYPE short8 #define qs16x16_TYPE short16 +#define qs32_TYPE int +#define qs32x1_TYPE int +#define qs32x2_TYPE int2 +#define qs32x4_TYPE int4 +#define qs32x8_TYPE int8 +#define qs32x16_TYPE int16 + /* All internal constants are represented in the maximum supported fixed point format (QS16), * thus we define an additional shift parameter required to convert the constant * from the maximum supported format to the require one. @@ -166,6 +178,7 @@ SUBQ_SAT_IMPL(qs8x16) } MULQ_SAT_IMPL(qs8x16, qs16x16) +MULQ_SAT_IMPL(qs16x8, qs32x8) #define MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) mul_sat_##type##x##size((a), (b), (position)) #define MUL_SAT_OP_EXPAND(a, b, type, size, position) MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) @@ -186,6 +199,7 @@ MULQ_SAT_IMPL(qs8x16, qs16x16) MLAQ_SAT_IMPL(qs8x8, qs16x8) MLAQ_SAT_IMPL(qs8x16, qs16x16) +MLAQ_SAT_IMPL(qs16x8, qs32x8) #define MLA_SAT_OP_EXPAND_STR(a, b, c, type, size, position) mla_sat_##type##x##size((a), (b), (c), (position)) #define MLA_SAT_OP_EXPAND(a, b, c, type, size, position) MLA_SAT_OP_EXPAND_STR(a, b, c, type, size, position) @@ -205,6 +219,7 @@ MLAQ_SAT_IMPL(qs8x16, qs16x16) } MLALQ_SAT_IMPL(qs8x8, qs16x8) +MLALQ_SAT_IMPL(qs16x8, qs32x8) #define MLAL_SAT_OP_EXPAND_STR(a, b, c, type, size, position) mlal_sat_##type##x##size((a), (b), (c), (position)) #define MLAL_SAT_OP_EXPAND(a, b, c, type, size, position) MLAL_SAT_OP_EXPAND_STR(a, b, c, type, size, position) diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl index d25621db64..7ac421b7b6 100644 --- a/src/core/CL/cl_kernels/gemm.cl +++ b/src/core/CL/cl_kernels/gemm.cl @@ -888,7 +888,93 @@ __kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0), vstore16(c20_qs8, 0, (__global char *)(offset(&dst, 0, 2))); vstore16(c30_qs8, 0, (__global char *)(offset(&dst, 0, 3))); } -#endif /* FIXED_POINT_POSITION */ + +/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 16 bit fixed point precision + * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication + * + * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DWIDTH_MATRIX_B -DALPHA and -DFIXED_POINT_POSITION + * + * @note: ALPHA must be passed in 16 bit fixed point format + * + * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS16 + * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr + * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0), + IMAGE_DECLARATION(src1), + IMAGE_DECLARATION(dst)) +{ + /* src_addr.s0 = address of matrix A */ + /* src_addr.s1 = address of matrix B */ + + /* Compute address for matrix A and B */ + int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y), + (src1_stride_y)); + + /* Add offset_first_element_in_bytes */ + src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); + + /* Divide by 2 in order to get the src_addr in unit of short */ + src_addr = src_addr >> 1; + + /* Compute end row address for matrix B */ + int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B; + + /* Reset accumulators */ + int8 c00 = 0.0f; + int8 c10 = 0.0f; + int8 c20 = 0.0f; + int8 c30 = 0.0f; + + /* This for loop performs 1 accumulation for each iteration */ + for(; src_addr.s1 <= (end_row_mtx_b - 8); src_addr += (int2)(4, 8)) + { + /* Load values from matrix A (interleaved) and matrix B (transposed) */ + short4 a0 = vload4(0, ((__global short *)src0_ptr) + src_addr.s0); + short8 b0 = vload8(0, ((__global short *)src1_ptr) + src_addr.s1); + + c00 = mlal_sat_qs16x8(c00, (short8)a0.s0, b0, FIXED_POINT_POSITION); + c10 = mlal_sat_qs16x8(c10, (short8)a0.s1, b0, FIXED_POINT_POSITION); + c20 = mlal_sat_qs16x8(c20, (short8)a0.s2, b0, FIXED_POINT_POSITION); + c30 = mlal_sat_qs16x8(c30, (short8)a0.s3, b0, FIXED_POINT_POSITION); + } + + /* Compute destination address */ + Image dst = CONVERT_TO_IMAGE_STRUCT(dst); + + /* Multiply by the weight of matrix product */ + short8 c00_qs16 = convert_short8_sat(c00); + short8 c10_qs16 = convert_short8_sat(c10); + short8 c20_qs16 = convert_short8_sat(c20); + short8 c30_qs16 = convert_short8_sat(c30); + + c00_qs16 = mul_sat_qs16x8(c00_qs16, (short8)ALPHA, FIXED_POINT_POSITION); + c10_qs16 = mul_sat_qs16x8(c10_qs16, (short8)ALPHA, FIXED_POINT_POSITION); + c20_qs16 = mul_sat_qs16x8(c20_qs16, (short8)ALPHA, FIXED_POINT_POSITION); + c30_qs16 = mul_sat_qs16x8(c30_qs16, (short8)ALPHA, FIXED_POINT_POSITION); + + /* Store 8x4 block */ + vstore8(c00_qs16, 0, (__global short *)(offset(&dst, 0, 0))); + vstore8(c10_qs16, 0, (__global short *)(offset(&dst, 0, 1))); + vstore8(c20_qs16, 0, (__global short *)(offset(&dst, 0, 2))); + vstore8(c30_qs16, 0, (__global short *)(offset(&dst, 0, 3))); +} +#endif // defined(FIXED_POINT_POSITION) #ifdef WIDTH_VECTOR_A /** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1) @@ -1111,9 +1197,87 @@ __kernel void gemm_vm_qs8(IMAGE_DECLARATION(src0), /* Store 16 values */ vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 0))); } -#endif /* FIXED_POINT_POSITION */ -#endif /* WIDTH_VECTOR_A */ -#endif /* WIDTH_MATRIX_B && ALPHA */ + +/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1) in 16 bit fixed point + * + * @attention The width of vector A, the width of matrix B, the alpha's value and the fixed point position need to be passed at compile time using -DWIDTH_VECTOR_A -DWIDTH_MATRIX_B, -DALPHA and -DFIXED_POINT_POSITION + * + * @attention The input vector A and matrix B must not be reshaped + * + * @note: ALPHA must be passed in 16 bit fixed point format + * + * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS16 + * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr + * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_vm_qs16(IMAGE_DECLARATION(src0), + IMAGE_DECLARATION(src1), + IMAGE_DECLARATION(dst)) +{ + int idx = get_global_id(0) * 8; + + /* Compute the address for the vector A and matrix B */ + int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); + src_addr.s1 += idx * sizeof(short); + + int end_row_vec_a = src_addr.s0 + (WIDTH_VECTOR_A * sizeof(short)); + + /* Reset accumulator */ + int8 acc0 = 0; + + /* This for loop performs 4 accumulations per iteration */ + for(; src_addr.s0 <= (end_row_vec_a - 4 * sizeof(short)); src_addr += (int2)(4 * sizeof(short), 4 * src1_stride_y)) + { + short4 a0 = vload4(0, (__global short *)(src0_ptr + src_addr.s0)); + short8 b0 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y)); + short8 b1 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y)); + short8 b2 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 2 * src1_stride_y)); + short8 b3 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 3 * src1_stride_y)); + + acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s0, b0, FIXED_POINT_POSITION); + acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s1, b1, FIXED_POINT_POSITION); + acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s2, b2, FIXED_POINT_POSITION); + acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s3, b3, FIXED_POINT_POSITION); + } + + /* Left-over accumulations */ + for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(short), src1_stride_y)) + { + short a0 = *((__global short *)(src0_ptr + src_addr.s0)); + short8 b0 = vload8(0, (__global short *)(src1_ptr + src_addr.s1)); + + acc0 = mlal_sat_qs16x8(acc0, (short8)a0, b0, FIXED_POINT_POSITION); + } + + /* Compute destination address */ + Image dst = CONVERT_TO_IMAGE_STRUCT(dst); + + /* Multiply by the weight of matrix product */ + short8 acc_qs16 = convert_short8_sat(acc0); + + acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION); + + /* Store 8 values */ + vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 0))); +} +#endif /* defined(FIXED_POINT_POSITION) */ +#endif /* defined(WIDTH_VECTOR_A) */ +#endif /* defined(WIDTH_MATRIX_B) && defined(ALPHA) */ #ifdef BETA /** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta: @@ -1229,8 +1393,47 @@ __kernel void gemm_ma_qs8(IMAGE_DECLARATION(src), /* Store final result in axb matrix */ vstore16(out, 0, (__global char *)dst.ptr); } -#endif /* FIXED_POINT_POSITION */ -#endif /* BETA */ + +/** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 16 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta: + * + * @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION + * + * @note: BETA must be passed in 16 bit fixed point format + * + * @param[in] src_ptr Pointer to the source matrix. Supported data types: QS16 + * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_ma_qs16(IMAGE_DECLARATION(src), + IMAGE_DECLARATION(dst)) +{ + /* Compute source and destination addresses */ + Image src = CONVERT_TO_IMAGE_STRUCT(src); + Image dst = CONVERT_TO_IMAGE_STRUCT(dst); + + /* Load values from A x B */ + short8 alpha_ab = vload8(0, (__global short *)dst.ptr); + + /* Load values from Matrix C */ + short8 c = vload8(0, (__global short *)src.ptr); + + /* Computes alpha * axb + beta * c */ + short8 out = mla_sat_qs16x8(alpha_ab, (short8)BETA, c, FIXED_POINT_POSITION); + + /* Store final result in axb matrix */ + vstore8(out, 0, (__global short *)dst.ptr); +} +#endif /* defined(FIXED_POINT_POSITION) */ +#endif /* defined(BETA) */ #ifdef WIDTH_VECTOR_A /** This OpenCL kernel computes the vector by matrix multiplication between each row of A (src0) and matrix B (src1) used for locally connected layer diff --git a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp index 3850c4d2cd..5b6e0ec6af 100644 --- a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp +++ b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp @@ -43,7 +43,8 @@ CLGEMMInterleave4x4Kernel::CLGEMMInterleave4x4Kernel() void CLGEMMInterleave4x4Kernel::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::QS16, DataType::U32, DataType::S32, DataType::F16, + DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(output); TensorShape output_shape = input->info()->tensor_shape(); diff --git a/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp index 5883dd698b..d1cdd7dc61 100644 --- a/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp @@ -43,7 +43,7 @@ CLGEMMMatrixAdditionKernel::CLGEMMMatrixAdditionKernel() void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *output, float beta) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0)); ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1)); @@ -53,8 +53,19 @@ void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *ou const unsigned int num_elems_processed_per_iteration = max_cl_vector_width / data_size_from_type(input->info()->data_type()); std::ostringstream ma_arguments; - ma_arguments << "-DBETA=" << (input->info()->data_type() == DataType::QS8 ? scvt_qs8_f32(beta, input->info()->fixed_point_position()) : beta) << " "; - ma_arguments << "-DFIXED_POINT_POSITION=" << input->info()->fixed_point_position(); + if(is_data_type_fixed_point(input->info()->data_type())) + { + ma_arguments << "-DBETA=" << (input->info()->data_type() == DataType::QS8 ? + scvt_qs8_f32(beta, input->info()->fixed_point_position()) : + scvt_qs16_f32(beta, input->info()->fixed_point_position())) + << " "; + ma_arguments << "-DFIXED_POINT_POSITION=" << input->info()->fixed_point_position(); + } + else + { + ma_arguments << "-DBETA=" << beta; + } + std::set build_opts; build_opts.emplace(ma_arguments.str()); diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp index 7c5b3d7866..2d6b83a97d 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp @@ -50,7 +50,7 @@ CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel() void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output); ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output); @@ -74,8 +74,18 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen std::ostringstream mm_arguments; mm_arguments << "-DWIDTH_MATRIX_B=" << input1->info()->dimension(0) << " "; - mm_arguments << "-DALPHA=" << (input0->info()->data_type() == DataType::QS8 ? scvt_qs8_f32(alpha, input0->info()->fixed_point_position()) : alpha) << " "; - mm_arguments << "-DFIXED_POINT_POSITION=" << input0->info()->fixed_point_position() << " "; + if(is_data_type_fixed_point(input0->info()->data_type())) + { + mm_arguments << "-DALPHA=" << (input0->info()->data_type() == DataType::QS8 ? + scvt_qs8_f32(alpha, input0->info()->fixed_point_position()) : + scvt_qs16_f32(alpha, input0->info()->fixed_point_position())) + << " "; + mm_arguments << "-DFIXED_POINT_POSITION=" << input0->info()->fixed_point_position() << " "; + } + else + { + mm_arguments << "-DALPHA=" << alpha << " "; + } std::set build_opts; // Check if the output tensor is a vector. If so,the kernel runs the vector-matrix multiplication diff --git a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp index ecef7e1774..73c8429055 100644 --- a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp +++ b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp @@ -40,7 +40,8 @@ using namespace arm_compute; void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::QS16, DataType::U32, DataType::S32, DataType::F16, + DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(output); TensorShape output_shape{ input->info()->tensor_shape() }; diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index 6d22825694..935e856333 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -45,7 +45,7 @@ CLGEMM::CLGEMM() void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QS8, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output); if(c != nullptr) diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp index f79d84f271..a9b35a8f62 100644 --- a/tests/validation/CL/GEMM.cpp +++ b/tests/validation/CL/GEMM.cpp @@ -50,7 +50,7 @@ using namespace arm_compute::test::validation; namespace { const float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ -const float tolerance_qs8 = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */ +const float tolerance_q = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */ CLTensor compute_gemm(const TensorShape &src_shape1, const TensorShape &src_shape2, const TensorShape &src_shape3, const TensorShape &out_shape, float alpha, float beta, DataType dt, int fixed_point_position = 0) @@ -104,7 +104,7 @@ BOOST_AUTO_TEST_SUITE(GEMM) BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly")) BOOST_DATA_TEST_CASE(Configuration, - SmallGEMMDataset() * boost::unit_test::data::make({ DataType::F32, DataType::QS8 }), + SmallGEMMDataset() * boost::unit_test::data::make({ DataType::F32, DataType::QS8, DataType::QS16 }), gemm_set, dt) { // Set fixed point position data type allowed @@ -169,7 +169,7 @@ BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE(Quantized) BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) -BOOST_DATA_TEST_CASE(SmallGEMM, SmallGEMMDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(4, 7), +BOOST_DATA_TEST_CASE(SmallGEMM, SmallGEMMDataset() * boost::unit_test::data::make({ DataType::QS8, DataType::QS16 }) * boost::unit_test::data::xrange(4, 7), gemm_set, dt, fixed_point_position) { // Compute reference @@ -179,11 +179,11 @@ BOOST_DATA_TEST_CASE(SmallGEMM, SmallGEMMDataset() * boost::unit_test::data::mak CLTensor dst = compute_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt, fixed_point_position); // Validate output - validate(CLAccessor(dst), ref_dst, tolerance_qs8); + validate(CLAccessor(dst), ref_dst, tolerance_q); } BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) -BOOST_DATA_TEST_CASE(LargeGEMM, LargeGEMMDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(4, 7), +BOOST_DATA_TEST_CASE(LargeGEMM, LargeGEMMDataset() * boost::unit_test::data::make({ DataType::QS8, DataType::QS16 }) * boost::unit_test::data::xrange(4, 7), gemm_set, dt, fixed_point_position) { // Compute reference @@ -193,7 +193,7 @@ BOOST_DATA_TEST_CASE(LargeGEMM, LargeGEMMDataset() * boost::unit_test::data::mak CLTensor dst = compute_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt, fixed_point_position); // Validate output - validate(CLAccessor(dst), ref_dst, tolerance_qs8); + validate(CLAccessor(dst), ref_dst, tolerance_q); } BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1