From 3a3066bf2b35deee0f29403d1a1b15590e4860c5 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Fri, 23 Jun 2017 13:38:14 +0100 Subject: COMPMID-411 - Port CLGEMM to support 8 bit fixed point Change-Id: I6c8bd69ae9715e4d83d128b2162fc15aa5561afb Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78804 Tested-by: Kaizen Reviewed-by: Georgios Pinitas Reviewed-by: Moritz Pflanzer --- src/core/CL/CLKernelLibrary.cpp | 3 + src/core/CL/cl_kernels/fixed_point.h | 67 ++++- src/core/CL/cl_kernels/gemm.cl | 290 ++++++++++++++++++--- src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp | 9 +- src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp | 15 +- src/runtime/CL/functions/CLGEMM.cpp | 30 +-- src/runtime/NEON/functions/NEGEMM.cpp | 5 +- 7 files changed, 336 insertions(+), 83 deletions(-) (limited to 'src') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 081edac8e1..6cf5ce2564 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -157,12 +157,15 @@ const std::map CLKernelLibrary::_kernel_program_map = { "gemm_interleave4x4_32bit", "gemm.cl" }, { "gemm_ma_f16", "gemm.cl" }, { "gemm_ma_f32", "gemm.cl" }, + { "gemm_ma_qs8", "gemm.cl" }, { "gemm_mm_u8", "gemm.cl" }, { "gemm_mm_f16", "gemm.cl" }, { "gemm_mm_f32_midgard", "gemm.cl" }, { "gemm_mm_f32_bifrost", "gemm.cl" }, + { "gemm_mm_qs8", "gemm.cl" }, { "gemm_vm_f16", "gemm.cl" }, { "gemm_vm_f32", "gemm.cl" }, + { "gemm_vm_qs8", "gemm.cl" }, { "gemm_lc_vm_f32", "gemm.cl" }, { "gemm_transpose1x16", "gemm.cl" }, { "gemm_transpose1x8", "gemm.cl" }, diff --git a/src/core/CL/cl_kernels/fixed_point.h b/src/core/CL/cl_kernels/fixed_point.h index 2c100c2e28..c0855db056 100644 --- a/src/core/CL/cl_kernels/fixed_point.h +++ b/src/core/CL/cl_kernels/fixed_point.h @@ -82,9 +82,9 @@ TYPE_ALIAS(short, qs16) /* Computes max of fixed point types. * - * @param[in] type is the actual data type. + * @param[in] type the actual data type. * - * @return The result of the fixed point vector maximum. + * @return The result of the fixed point maximum. */ #define MAXQ_IMPL(type) \ inline type max_##type(type VopA, type VopB) \ @@ -103,9 +103,9 @@ MAXQ_IMPL(qs8x16) /* Computes saturated addition of fixed point types. * - * @param[in] type is the actual data type. + * @param[in] type the actual data type. * - * @return The result of the fixed point vector addition. The result is saturated in case of overflow + * @return The result of the fixed point addition. The result is saturated in case of overflow */ #define ADDQ_SAT_IMPL(type) \ inline type add_sat_##type(type VopA, type VopB) \ @@ -124,9 +124,9 @@ ADDQ_SAT_IMPL(qs8x16) /* Computes saturated subtraction of fixed point types. * - * @param[in] type is the actual data type. + * @param[in] type the actual data type. * - * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow + * @return The result of the fixed point subtraction. The result is saturated in case of overflow */ #define SUBQ_SAT_IMPL(type) \ inline type sub_sat_##type(type VopA, type VopB) \ @@ -143,12 +143,12 @@ SUBQ_SAT_IMPL(qs8x16) #define SUB_SAT_OP_EXPAND_STR(a, b, type, size) sub_sat_##type##x##size((a), (b)) #define SUB_SAT_OP_EXPAND(a, b, type, size) SUB_SAT_OP_EXPAND_STR(a, b, type, size) -/* Saturate multiply of two fixed point vectors +/* Saturate multiply of two fixed point numbers * - * @param[in] type is the actual data type. - * @param[in] itype is the intermediate data type. + * @param[in] type the actual data type. + * @param[in] itype the intermediate data type. * - * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow + * @return The result of the fixed point multiplication. The result is saturated in case of overflow */ #define MULQ_SAT_IMPL(type, itype) \ inline type mul_sat_##type(type VopA, type VopB, int fixed_point_position) \ @@ -163,11 +163,50 @@ MULQ_SAT_IMPL(qs8x16, qs16x16) #define MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) mul_sat_##type##x##size((a), (b), (position)) #define MUL_SAT_OP_EXPAND(a, b, type, size, position) MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) -/** Saturate division of two fixed point vectors +/* Saturate multiply-accumulate + * + * @param[in] type the actual data type. + * @param[in] itype the intermediate data type. + * + * @return The result of the fixed point multiply-accumulate. The result is saturated in case of overflow + */ +#define MLAQ_SAT_IMPL(type, itype) \ + type mla_sat_##type(type VopA, type VopB, type VopC, int fixed_point_position) \ + { \ + itype res = mad_sat(CONVERT(VopB, itype), CONVERT(VopC, itype), (itype)(1 << (fixed_point_position - 1))); \ + return add_sat(VopA, CONVERT_SAT(res >> (itype)fixed_point_position, type)); \ + } + +MLAQ_SAT_IMPL(qs8x8, qs16x8) +MLAQ_SAT_IMPL(qs8x16, qs16x16) + +#define MLA_SAT_OP_EXPAND_STR(a, b, c, type, size, position) mla_sat_##type##x##size((a), (b), (c), (position)) +#define MLA_SAT_OP_EXPAND(a, b, c, type, size, position) MLA_SAT_OP_EXPAND_STR(a, b, c, type, size, position) + +/* Saturate multiply-accumulate long + * + * @param[in] type the actual data type. + * @param[in] itype the intermediate data type. + * + * @return The result of the fixed point multiply-accumulate long. The result is saturated in case of overflow + */ +#define MLALQ_SAT_IMPL(type, itype) \ + itype mlal_sat_##type(itype VopA, type VopB, type VopC, int fixed_point_position) \ + { \ + itype res = mad_sat(CONVERT(VopB, itype), CONVERT(VopC, itype), (itype)(1 << (fixed_point_position - 1))); \ + return add_sat(VopA, res >> (itype)fixed_point_position); \ + } + +MLALQ_SAT_IMPL(qs8x8, qs16x8) + +#define MLAL_SAT_OP_EXPAND_STR(a, b, c, type, size, position) mlal_sat_##type##x##size((a), (b), (c), (position)) +#define MLAL_SAT_OP_EXPAND(a, b, c, type, size, position) MLAL_SAT_OP_EXPAND_STR(a, b, c, type, size, position) + +/** Saturate division of two fixed point numbers * - * @param[in] stype is the actual scalar data type. - * @param[in] type is the actual data type. - * @param[in] itype is the intermediate data type. + * @param[in] stype the actual scalar data type. + * @param[in] type the actual data type. + * @param[in] itype the intermediate data type. * * @return The result of the fixed point division. The result is saturated in case of overflow */ diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl index 9bec8d5d92..796b343bda 100644 --- a/src/core/CL/cl_kernels/gemm.cl +++ b/src/core/CL/cl_kernels/gemm.cl @@ -21,17 +21,18 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "fixed_point.h" #include "helpers.h" /** This OpenCL kernel computes the "vector" 1x4 transposition of input matrix * - * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32 + * @param[in] src_ptr Pointer to the source matrix. Supported data types: U32/S32/F32 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -57,13 +58,13 @@ __kernel void gemm_transpose1x4(IMAGE_DECLARATION(src), /** This OpenCL kernel computes the "vector" 1x8 transposition of input matrix * - * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16 + * @param[in] src_ptr Pointer to the source matrix. Supported data types: U16/S16/QS16/F16 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -89,13 +90,13 @@ __kernel void gemm_transpose1x8(IMAGE_DECLARATION(src), /** This OpenCL kernel computes the "vector" 1x16 transposition of input matrix * - * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8 + * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8/QS8 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U8 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -127,7 +128,7 @@ __kernel void gemm_transpose1x16(IMAGE_DECLARATION(src), * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U32/S32/F32 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -162,13 +163,13 @@ __kernel void gemm_interleave4x4_32bit(IMAGE_DECLARATION(src), /** This OpenCL kernel reshapes the input matrix transposing each 4x4 block and interleaving the values * - * @param[in] src_ptr Pointer to the source matrix. Supported data types: U16/S16/F16 + * @param[in] src_ptr Pointer to the source matrix. Supported data types: U16/S16/QS16/F16 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U16/S16/F16 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -203,13 +204,13 @@ __kernel void gemm_interleave4x4_16bit(IMAGE_DECLARATION(src), /** This OpenCL kernel reshapes the input matrix transposing each 4x4 block and interleaving the values * - * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8 + * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8/QS8 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U8/S8 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -250,13 +251,13 @@ __kernel void gemm_interleave4x4_8bit(IMAGE_DECLARATION(src), * * @note The data type must be passed at compile time -DDATA_TYPE=type. e.g. -DDATA_TYPE=short * - * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: F32 + * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: U8/S8/QS8/U16/S16/F16/U32/S32/F32 * @param[in] accum_stride_x Stride of the accmulate tensor in X dimension (in bytes) * @param[in] accum_step_x accum_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] accum_stride_y Stride of the accumlulate tensor in Y dimension (in bytes) * @param[in] accum_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] accum_offset_first_element_in_bytes The offset of the first element in the accumulate tensor - * @param[in] biases_ptr Pointer to the biases vector. Same as input. + * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr * @param[in] biases_stride_x Stride of the destination tensor in X dimension (in bytes) * @param[in] biases_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the destination tensor @@ -282,7 +283,7 @@ __kernel void gemm_accumulate_biases( #if(defined WIDTH_MATRIX_B) /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) - * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_u8 and @ref gemm_transpose1x16_u8 before running the matrix multiplication + * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication * * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B * @@ -292,13 +293,13 @@ __kernel void gemm_accumulate_biases( * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported formats: U8 + * @param[in] src1_ptr Pointer to the source matrix. Supported formats: same as @p src0_ptr * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported formats: U8 + * @param[out] dst_ptr Pointer to the destination matrix Supported formats: same as @p src0_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -388,7 +389,7 @@ __kernel void gemm_mm_u8(IMAGE_DECLARATION(src0), #if(defined WIDTH_MATRIX_B && defined ALPHA) /** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1) - * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_f32 and @ref gemm_transpose1x4_f32 before running the matrix multiplication + * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication * * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA * @@ -398,13 +399,13 @@ __kernel void gemm_mm_u8(IMAGE_DECLARATION(src0), * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32 + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -487,7 +488,7 @@ __kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0), } /** This OpenCL kernel is optimised for Bifrost. It computes the matrix multiplication between matrix A (src0) and matrix B (src1) - * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_f32 and @ref gemm_transpose1x4_f32 before running the matrix multiplication + * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication * * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA * @@ -497,13 +498,13 @@ __kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0), * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32 + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -697,7 +698,7 @@ __kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0), } /** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) - * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_f16 and @ref gemm_transpose1x8_f16 before running the matrix multiplication + * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication * * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA * @@ -707,13 +708,13 @@ __kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0), * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F16 + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -795,6 +796,100 @@ __kernel void gemm_mm_f16(IMAGE_DECLARATION(src0), vstore8(c30, 0, (__global half *)(offset(&dst, 0, 3))); } +#if(defined FIXED_POINT_POSITION) +/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 8 bit fixed point precision + * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication + * + * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DWIDTH_MATRIX_B -DALPHA and -DFIXED_POINT_POSITION + * + * @note: ALPHA must be passed in 8 bit fixed point format + * + * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8 + * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr + * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0), + IMAGE_DECLARATION(src1), + IMAGE_DECLARATION(dst)) +{ + /* src_addr.s0 = address of matrix A */ + /* src_addr.s1 = address of matrix B */ + + /* Compute address for matrix A and B */ + int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y), + (src1_stride_y)); + + /* Add offset_first_element_in_bytes */ + src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); + + /* Compute end row address for matrix B */ + int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B; + + /* Reset accumulators */ + short8 c00 = 0.0f; + short8 c10 = 0.0f; + short8 c20 = 0.0f; + short8 c30 = 0.0f; + short8 c01 = 0.0f; + short8 c11 = 0.0f; + short8 c21 = 0.0f; + short8 c31 = 0.0f; + + /* This for loop performs 1 accumulation for each iteration */ + for(; src_addr.s1 <= (end_row_mtx_b - 16); src_addr += (int2)(4, 16)) + { + /* Load values from matrix A (interleaved) and matrix B (transposed) */ + char4 a0 = vload4(0, ((__global char *)src0_ptr) + src_addr.s0); + char16 b0 = vload16(0, ((__global char *)src1_ptr) + src_addr.s1); + + c00 = mlal_sat_qs8x8(c00, (char8)a0.s0, b0.s01234567, FIXED_POINT_POSITION); + c10 = mlal_sat_qs8x8(c10, (char8)a0.s1, b0.s01234567, FIXED_POINT_POSITION); + c20 = mlal_sat_qs8x8(c20, (char8)a0.s2, b0.s01234567, FIXED_POINT_POSITION); + c30 = mlal_sat_qs8x8(c30, (char8)a0.s3, b0.s01234567, FIXED_POINT_POSITION); + + c01 = mlal_sat_qs8x8(c01, (char8)a0.s0, b0.s89ABCDEF, FIXED_POINT_POSITION); + c11 = mlal_sat_qs8x8(c11, (char8)a0.s1, b0.s89ABCDEF, FIXED_POINT_POSITION); + c21 = mlal_sat_qs8x8(c21, (char8)a0.s2, b0.s89ABCDEF, FIXED_POINT_POSITION); + c31 = mlal_sat_qs8x8(c31, (char8)a0.s3, b0.s89ABCDEF, FIXED_POINT_POSITION); + } + + /* Compute destination address */ + Image dst = CONVERT_TO_IMAGE_STRUCT(dst); + + /* Multiply by the weight of matrix product */ + char16 c00_qs8 = convert_char16_sat((short16)(c00, c01)); + char16 c10_qs8 = convert_char16_sat((short16)(c10, c11)); + char16 c20_qs8 = convert_char16_sat((short16)(c20, c21)); + char16 c30_qs8 = convert_char16_sat((short16)(c30, c31)); + + c00_qs8 = mul_sat_qs8x16(c00_qs8, (char16)ALPHA, FIXED_POINT_POSITION); + c10_qs8 = mul_sat_qs8x16(c10_qs8, (char16)ALPHA, FIXED_POINT_POSITION); + c20_qs8 = mul_sat_qs8x16(c20_qs8, (char16)ALPHA, FIXED_POINT_POSITION); + c30_qs8 = mul_sat_qs8x16(c30_qs8, (char16)ALPHA, FIXED_POINT_POSITION); + + /* Store 16x4 block */ + vstore16(c00_qs8, 0, (__global char *)(offset(&dst, 0, 0))); + vstore16(c10_qs8, 0, (__global char *)(offset(&dst, 0, 1))); + vstore16(c20_qs8, 0, (__global char *)(offset(&dst, 0, 2))); + vstore16(c30_qs8, 0, (__global char *)(offset(&dst, 0, 3))); +} +#endif // (defined FIXED_POINT_POSITION) + #if(defined WIDTH_VECTOR_A) /** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1) * @@ -808,13 +903,13 @@ __kernel void gemm_mm_f16(IMAGE_DECLARATION(src0), * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32 + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -874,13 +969,13 @@ __kernel void gemm_vm_f32(IMAGE_DECLARATION(src0), * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F16 + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -931,6 +1026,92 @@ __kernel void gemm_vm_f16(IMAGE_DECLARATION(src0), vstore8(acc, 0, (__global half *)(offset(&dst, 0, 0))); } + +#if(defined FIXED_POINT_POSITION) +/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1) in 8 bit fixed point + * + * @attention The width of vector A, the width of matrix B, the alpha's value and the fixed point position need to be passed at compile time using -DWIDTH_VECTOR_A -DWIDTH_MATRIX_B, -DALPHA and -DFIXED_POINT_POSITION + * + * @attention The input vector A and matrix B must not be reshaped + * + * @note: ALPHA must be passed in 8 bit fixed point format + * + * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8 + * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr + * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_vm_qs8(IMAGE_DECLARATION(src0), + IMAGE_DECLARATION(src1), + IMAGE_DECLARATION(dst)) +{ + int idx = get_global_id(0) * 16; + + /* Compute the address for the vector A and matrix B */ + int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); + src_addr.s1 += idx; + + int end_row_vec_a = src_addr.s0 + WIDTH_VECTOR_A; + + short8 acc0 = 0; + short8 acc1 = 0; + + /* This for loop performs 4 accumulations per iteration */ + for(; src_addr.s0 <= (end_row_vec_a - 4); src_addr += (int2)(4, 4 * src1_stride_y)) + { + char4 a0 = vload4(0, (__global char *)(src0_ptr + src_addr.s0)); + char16 b0 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y)); + char16 b1 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y)); + char16 b2 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 2 * src1_stride_y)); + char16 b3 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 3 * src1_stride_y)); + + acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s0, b0.s01234567, FIXED_POINT_POSITION); + acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s1, b1.s01234567, FIXED_POINT_POSITION); + acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s2, b2.s01234567, FIXED_POINT_POSITION); + acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s3, b3.s01234567, FIXED_POINT_POSITION); + + acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s0, b0.s89ABCDEF, FIXED_POINT_POSITION); + acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s1, b1.s89ABCDEF, FIXED_POINT_POSITION); + acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s2, b2.s89ABCDEF, FIXED_POINT_POSITION); + acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s3, b3.s89ABCDEF, FIXED_POINT_POSITION); + } + + /* Left-over accumulations */ + for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(1, src1_stride_y)) + { + char a0 = *((__global char *)(src0_ptr + src_addr.s0)); + char16 b0 = vload16(0, (__global char *)(src1_ptr + src_addr.s1)); + + acc0 = mlal_sat_qs8x8(acc0, (char8)a0, b0.s01234567, FIXED_POINT_POSITION); + acc1 = mlal_sat_qs8x8(acc1, (char8)a0, b0.s89ABCDEF, FIXED_POINT_POSITION); + } + + /* Compute destination address */ + Image dst = CONVERT_TO_IMAGE_STRUCT(dst); + + /* Multiply by the weight of matrix product */ + char16 acc_qs8 = convert_char16_sat((short16)(acc0, acc1)); + + acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION); + + /* Store 16 values */ + vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 0))); +} +#endif /* #if(defined FIXED_POINT_POSITION) */ #endif /* (defined WIDTH_VECTOR_A) */ #endif /* (defined WIDTH_MATRIX_B && defined ALPHA) */ @@ -945,7 +1126,7 @@ __kernel void gemm_vm_f16(IMAGE_DECLARATION(src0), * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -973,6 +1154,8 @@ __kernel void gemm_ma_f32(IMAGE_DECLARATION(src), } /** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta: + * + * @attention The beta's value need to be passed at compile time using -DBETA * * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16 * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) @@ -980,7 +1163,7 @@ __kernel void gemm_ma_f32(IMAGE_DECLARATION(src), * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -1006,6 +1189,47 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src), /* Store final result in axb matrix */ vstore8(out, 0, (__global half *)dst.ptr); } + +#if(defined FIXED_POINT_POSITION) +/** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 8 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta: + * + * @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION + * + * @note: BETA must be passed in 8 bit fixed point format + * + * @param[in] src_ptr Pointer to the source matrix. Supported data types: QS8 + * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_ma_qs8(IMAGE_DECLARATION(src), + IMAGE_DECLARATION(dst)) +{ + /* Compute source and destination addresses */ + Image src = CONVERT_TO_IMAGE_STRUCT(src); + Image dst = CONVERT_TO_IMAGE_STRUCT(dst); + + /* Load values from A x B */ + char16 alpha_ab = vload16(0, (__global char *)dst.ptr); + + /* Load values from Matrix C */ + char16 c = vload16(0, (__global char *)src.ptr); + + /* Computes alpha * axb + beta * c */ + char16 out = mla_sat_qs8x16(alpha_ab, (char16)BETA, c, FIXED_POINT_POSITION); + + /* Store final result in axb matrix */ + vstore16(out, 0, (__global char *)dst.ptr); +} +#endif /* #if(defined FIXED_POINT_POSITION) */ #endif /* (defined BETA) */ #if(defined WIDTH_VECTOR_A) @@ -1021,7 +1245,7 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src), * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32 + * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) @@ -1029,7 +1253,7 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src), * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) * @param[in] src1_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32 + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) diff --git a/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp index 343838f2f9..5883dd698b 100644 --- a/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp @@ -28,6 +28,7 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/CL/OpenCL.h" #include "arm_compute/core/Error.h" +#include "arm_compute/core/FixedPoint.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" @@ -40,10 +41,9 @@ CLGEMMMatrixAdditionKernel::CLGEMMMatrixAdditionKernel() { } -void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *output, const float beta) +void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *output, float beta) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0)); ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1)); @@ -53,7 +53,8 @@ void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *ou const unsigned int num_elems_processed_per_iteration = max_cl_vector_width / data_size_from_type(input->info()->data_type()); std::ostringstream ma_arguments; - ma_arguments << "-DBETA=" << beta; + ma_arguments << "-DBETA=" << (input->info()->data_type() == DataType::QS8 ? scvt_qs8_f32(beta, input->info()->fixed_point_position()) : beta) << " "; + ma_arguments << "-DFIXED_POINT_POSITION=" << input->info()->fixed_point_position(); std::set build_opts; build_opts.emplace(ma_arguments.str()); diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp index d7388e8579..7c5b3d7866 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp @@ -25,12 +25,12 @@ #include "arm_compute/core/AccessWindowStatic.h" #include "arm_compute/core/AccessWindowTranspose.h" - #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/CL/OpenCL.h" #include "arm_compute/core/Error.h" +#include "arm_compute/core/FixedPoint.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" @@ -50,10 +50,10 @@ CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel() void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output); + if(output->info()->dimension(1) == 1) { ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1)); @@ -74,7 +74,8 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen std::ostringstream mm_arguments; mm_arguments << "-DWIDTH_MATRIX_B=" << input1->info()->dimension(0) << " "; - mm_arguments << "-DALPHA=" << alpha << " "; + mm_arguments << "-DALPHA=" << (input0->info()->data_type() == DataType::QS8 ? scvt_qs8_f32(alpha, input0->info()->fixed_point_position()) : alpha) << " "; + mm_arguments << "-DFIXED_POINT_POSITION=" << input0->info()->fixed_point_position() << " "; std::set build_opts; // Check if the output tensor is a vector. If so,the kernel runs the vector-matrix multiplication @@ -98,7 +99,9 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen update_window_and_padding(win, input0_access, input1_access, output_access); - output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape())); + Coordinates coord; + coord.set_num_dimensions(output->info()->num_dimensions()); + output_access.set_valid_region(win, ValidRegion(coord, output->info()->tensor_shape())); ICLKernel::configure(win); } diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index 07b19421d6..6d22825694 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -45,13 +45,11 @@ CLGEMM::CLGEMM() void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::F32, DataType::F16); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::F32, DataType::F16); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QS8, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output); if(c != nullptr) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(c, 1, DataType::F32, DataType::F16); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, c); ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(1) != c->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A"); ARM_COMPUTE_ERROR_ON_MSG(b->info()->dimension(0) != c->info()->dimension(0), "The C matrix must have the same number of columns as the matrix C"); @@ -59,7 +57,6 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * ARM_COMPUTE_ERROR_ON_MSG(c->info()->dimension(1) != output->info()->dimension(1), "The C matrix must have the same number of columns as the output matrix"); } - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output); ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); // Check if the first input tensor is a vector. If so, all the kernels for reshaping the tensors can be skipped @@ -73,25 +70,14 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * shape_tmp_a.set(0, a->info()->dimension(0) * 4); shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.0f)); - if(DataType::F32 == a->info()->data_type()) - { - shape_tmp_b.set(0, b->info()->dimension(1) * 4); - shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 4.0f)); - } - else if(DataType::F16 == a->info()->data_type()) - { - shape_tmp_b.set(0, b->info()->dimension(1) * 8); - shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 8.0f)); - } - else - { - ARM_COMPUTE_ERROR("DataType not supported"); - } - - TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type()); + const unsigned int transpose_w = max_cl_vector_width / data_size_from_type(b->info()->data_type()); + shape_tmp_b.set(0, b->info()->dimension(1) * transpose_w); + shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / static_cast(transpose_w))); + + TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type(), a->info()->fixed_point_position()); _tmp_a.allocator()->init(info_a); - TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type()); + TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type(), b->info()->fixed_point_position()); _tmp_b.allocator()->init(info_b); // Configure interleave kernel diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp index 15d5f4effb..730735590d 100644 --- a/src/runtime/NEON/functions/NEGEMM.cpp +++ b/src/runtime/NEON/functions/NEGEMM.cpp @@ -44,12 +44,10 @@ NEGEMM::NEGEMM() void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::F32, DataType::F16, DataType::QS8); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::F32, DataType::F16, DataType::QS8); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(d, 1, DataType::F32, DataType::F16, DataType::QS8); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, d); if(c != nullptr) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(c, 1, DataType::F32, DataType::F16, DataType::QS8); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, c); ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(1) != c->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A"); ARM_COMPUTE_ERROR_ON_MSG(b->info()->dimension(0) != c->info()->dimension(0), "The C matrix must have the same number of columns as the matrix B"); @@ -57,7 +55,6 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe ARM_COMPUTE_ERROR_ON_MSG(c->info()->dimension(1) != d->info()->dimension(1), "The C matrix must have the same number of columns as the output matrix"); } - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, d); ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); // Check if the first input tensor is a vector. If so, all the kernels for reshaping the tensors can be skipped -- cgit v1.2.1