From b54ba2848515bf0aee0619c760518481f58c7525 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Tue, 14 Jan 2020 15:31:55 +0000 Subject: COMPMID-2847: Fuse output stage in GEMMLowpMatrixMultiplyReshapedOnlyRHS Change-Id: Icd60eb368a34295434e8c141885b4666973a92a1 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2732 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- ...CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h | 83 ++++-- arm_compute/core/KernelDescriptors.h | 23 +- .../CL/functions/CLGEMMLowpMatrixMultiplyCore.h | 3 +- src/core/CL/CLKernelLibrary.cpp | 3 +- src/core/CL/cl_kernels/gemm_helpers.h | 114 +++++++- src/core/CL/cl_kernels/gemmlowp.cl | 303 +++++++++++++++++++- src/core/CL/cl_kernels/repeat.h | 104 ++++++- ...GEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp | 311 ++++++++++++++++++--- .../CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp | 188 ++++++++----- .../CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp | 11 +- tests/validation/fixtures/GEMMLowpFixture.h | 18 +- 11 files changed, 1008 insertions(+), 153 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h index 9dd5496c00..7845d244eb 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #define ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H #include "arm_compute/core/CL/ICLKernel.h" +#include "arm_compute/core/KernelDescriptors.h" namespace arm_compute { @@ -33,6 +34,7 @@ class ICLTensor; /** OpenCL kernel to multiply matrices with QASYMM8 data type when only the input matrix RHS (input1) has been reshaped * * @note The input matrix input1 must be reshaped through @ref CLGEMMReshapeRHSMatrixKernel + * @note For fused output stage, only GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT type is supported */ class CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel : public ICLKernel { @@ -49,37 +51,59 @@ public: CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &operator=(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel &&) = default; /** Initialise the kernel's input and output. * - * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0 - * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32 - * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread - * lhs_info.m0: 2,3,4,5,6,7,8 - * lhs_info.k0: 2,3,4,8,16 - * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported: - * rhs_info.n0: 2,3,4,8,16 - * rhs_info.k0: 2,3,4,8,16 - * rhs_info.transpose: true - * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices + * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0 + * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/S32. + * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices, output stage information and RHS/LHS info. + * Only the following values are supported for LHS info: + * lhs_info.m0: 2,3,4,5,6,7,8 + * lhs_info.k0: 2,3,4,8,16 + * Only the following values are supported for RHS info: + * rhs_info.n0: 2,3,4,8,16 + * rhs_info.k0: same as lhs_info.k0 + * rhs_info.transpose: true + * @param[in] vector_sum_col (Optional) Input row-vector of sums of all the entries in each column of matrix B. + * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: S32 + * @param[in] vector_sum_row (Optional) Input row-vector of sums of all the entries in each row of matrix A. + * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: S32 + * @param[in] bias (Optional) Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: S32. + * @param[in] output_multipliers (Optional) Output multipliers tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM). + * Supported data types: S32. + * @param[in] output_shifts (Optional) Output shifts tensor. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM). + * Supported data types: S32. */ - void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info); + void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, const ICLTensor *vector_sum_col = nullptr, + const ICLTensor *vector_sum_row = nullptr, const ICLTensor *bias = nullptr, const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel * - * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[in] input1 Input tensor info for the RHS reshaped matrix. Data type supported: same as @p input0 - * @param[in] output Output tensor info. Data type supported: S32 - * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread - * lhs_info.m0: 2,3,4,5,6,7,8 - * lhs_info.k0: 2,3,4,8,16 - * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported: - * rhs_info.n0: 2,3,4,8,16 - * rhs_info.k0: same as lhs_info.k0 - * rhs_info.transpose: true - * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices + * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[in] input1 Input tensor info for the RHS reshaped matrix. Data type supported: same as @p input0 + * @param[in] output Output tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/S32. + * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices, output stage information and RHS/LHS info. + * Only the following values are supported for LHS info: + * lhs_info.m0: 2,3,4,5,6,7,8 + * lhs_info.k0: 2,3,4,8,16 + * Only the following values are supported for RHS info: + * rhs_info.n0: 2,3,4,8,16 + * rhs_info.k0: same as lhs_info.k0 + * rhs_info.transpose: true + * @param[in] vector_sum_col (Optional) Input row-vector info of sums of all the entries in each column of matrix B. + * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: S32 + * @param[in] vector_sum_row (Optional) Input row-vector info of sums of all the entries in each row of matrix A. + * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: S32 + * @param[in] bias (Optional) Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: S32. + * @param[in] output_multipliers (Optional) Output multipliers tensor info. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM). + * Supported data types: S32. + * @param[in] output_shifts (Optional) Output shifts tensor info. In case of per-channel quantization, the number of multipliers must be equal to the number of filters (OFM). + * Supported data types: S32. * * @return a status */ - static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - const GEMMReshapeInfo &gemm_info); + static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMKernelInfo &gemm_info, const ITensorInfo *vector_sum_col = nullptr, + const ITensorInfo *vector_sum_row = nullptr, const ITensorInfo *bias = nullptr, const ITensorInfo *output_multipliers = nullptr, + const ITensorInfo *output_shifts = nullptr); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -88,10 +112,17 @@ private: const ICLTensor *_input0; const ICLTensor *_input1; ICLTensor *_output; + const ICLTensor *_vector_sum_col; + const ICLTensor *_vector_sum_row; + const ICLTensor *_bias; + const ICLTensor *_output_multipliers; + const ICLTensor *_output_shifts; bool _slide_matrix_b; bool _reinterpret_input_as_3d; bool _reinterpret_output_as_3d; bool _use_dummy_work_items; + bool _is_quantized_per_channel; + bool _fuse_output_stage; }; } // namespace arm_compute #endif /* ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYRESHAPEDONLYRHSKERNEL_H */ \ No newline at end of file diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h index 4b04bebdef..58400b190b 100644 --- a/arm_compute/core/KernelDescriptors.h +++ b/arm_compute/core/KernelDescriptors.h @@ -54,14 +54,21 @@ struct FFTRadixStageKernelInfo /** Descriptor used by the GEMM kernels */ struct GEMMKernelInfo { - unsigned int m{ 0 }; /**< Number of LHS rows*/ - unsigned int n{ 0 }; /**< Number of RHS columns*/ - unsigned int k{ 0 }; /**< Number of LHS columns or RHS rows */ - unsigned int depth_output_gemm3d{ 0 }; /**< Depth of the output tensor in case is reinterpreted as 3D */ - bool reinterpret_input_as_3d{ false }; /**< Flag used to reinterpret the input as 3D */ - bool broadcast_bias{ false }; /**< Flag used to broadcase the bias addition */ - bool fp_mixed_precision{ false }; /**< Flag used to indicate wider accumulators (32 bit instead of 16 for FP16). */ - ActivationLayerInfo activation_info{}; /**< Activation function to perform after the matrix multiplication */ + unsigned int m{ 0 }; /**< Number of LHS rows*/ + unsigned int n{ 0 }; /**< Number of RHS columns*/ + unsigned int k{ 0 }; /**< Number of LHS columns or RHS rows */ + unsigned int depth_output_gemm3d{ 0 }; /**< Depth of the output tensor in case is reinterpreted as 3D */ + bool reinterpret_input_as_3d{ false }; /**< Flag used to reinterpret the input as 3D */ + bool broadcast_bias{ false }; /**< Flag used to broadcast the bias addition */ + bool fp_mixed_precision{ false }; /**< Flag used to indicate wider accumulators (32 bit instead of 16 for FP16). */ + ActivationLayerInfo activation_info{}; /**< Activation function to perform after the matrix multiplication */ + int mult_transpose1xW_width{ 1 }; /**< Multiplication factor for the width of the 1xW transposed block */ + int mult_interleave4x4_height{ 1 }; /**< Multiplication factor for the height of the 4x4 interleaved block */ + GEMMLHSMatrixInfo lhs_info{}; /**< LHS matrix information used to retrieve the number of rows processed by each thread */ + GEMMRHSMatrixInfo rhs_info{}; /**< RHS matrix information used for reshaping the RHS matrix */ + int32_t a_offset{ 0 }; /**< Offset to be added to each element of the matrix A */ + int32_t b_offset{ 0 }; /**< Offset to be added to each element of the matrix B */ + GEMMLowpOutputStageInfo output_stage{}; /**< GEMMLowp output stage information */ }; /** Descriptor used by the depthwise convolution kernels */ diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h index 66c5e9ee46..c9b1b70c54 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h +++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h @@ -135,8 +135,9 @@ private: bool _is_midgard; bool _reshape_b_only_on_first_run; bool _is_prepared; - bool _fuse_output_stage; + bool _run_output_stage; bool _convert_to_qasymm8; + bool _run_offset_contribution; }; } // namespace arm_compute #endif /*ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYCORE_H */ \ No newline at end of file diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 5b59094c81..5d67240299 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -341,6 +341,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "gemmlowp_mm_native", "gemmlowp.cl" }, { "gemmlowp_mm_reshaped_lhs_nt_rhs_t", "gemmlowp.cl" }, { "gemmlowp_mm_reshaped_only_rhs_t", "gemmlowp.cl" }, + { "gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint", "gemmlowp.cl" }, { "gemmlowp_offset_contribution", "gemmlowp.cl" }, { "gemmlowp_offset_contribution_quantize_down", "gemmlowp.cl" }, { "gemmlowp_offset_contribution_quantize_down_fixedpoint", "gemmlowp.cl" }, diff --git a/src/core/CL/cl_kernels/gemm_helpers.h b/src/core/CL/cl_kernels/gemm_helpers.h index 66e83c3558..79a9f094df 100644 --- a/src/core/CL/cl_kernels/gemm_helpers.h +++ b/src/core/CL/cl_kernels/gemm_helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -140,6 +140,118 @@ #define LOAD_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) /** @} */ // end of group LOAD_BLOCK +/** Loads the elements from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1). + * @name LOAD_ELEMENT_n + * + * @param[in] N0 The number of rows to load + * @param[in] DATA_TYPE The data type of variables + * @param[in] BASENAME The basename of the destination variables for the loaded rows + * @param[in] PTR The base pointer + * @param[in] OFFSET The offset within a row + * @param[in] STRIDE_Y The stride value in y-axis direction + * @{ + */ +#define LOAD_ELEMENT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##0 = *((__global DATA_TYPE *)(PTR + OFFSET + 0 * STRIDE_Y)); + +#define LOAD_ELEMENT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##1 = *((__global DATA_TYPE *)(PTR + OFFSET + 1 * STRIDE_Y)); + +#define LOAD_ELEMENT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##2 = *((__global DATA_TYPE *)(PTR + OFFSET + 2 * STRIDE_Y)); + +#define LOAD_ELEMENT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##3 = *((__global DATA_TYPE *)(PTR + OFFSET + 3 * STRIDE_Y)); + +#define LOAD_ELEMENT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##4 = *((__global DATA_TYPE *)(PTR + OFFSET + 4 * STRIDE_Y)); + +#define LOAD_ELEMENT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##5 = *((__global DATA_TYPE *)(PTR + OFFSET + 5 * STRIDE_Y)); + +#define LOAD_ELEMENT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##6 = *((__global DATA_TYPE *)(PTR + OFFSET + 6 * STRIDE_Y)); + +#define LOAD_ELEMENT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##7 = *((__global DATA_TYPE *)(PTR + OFFSET + 7 * STRIDE_Y)); + +#define LOAD_ELEMENT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##8 = *((__global DATA_TYPE *)(PTR + OFFSET + 8 * STRIDE_Y)); + +#define LOAD_ELEMENT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##9 = *((__global DATA_TYPE *)(PTR + OFFSET + 9 * STRIDE_Y)); + +#define LOAD_ELEMENT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##A = *((__global DATA_TYPE *)(PTR + OFFSET + 10 * STRIDE_Y)); + +#define LOAD_ELEMENT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##B = *((__global DATA_TYPE *)(PTR + OFFSET + 11 * STRIDE_Y)); + +#define LOAD_ELEMENT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##C = *((__global DATA_TYPE *)(PTR + OFFSET + 12 * STRIDE_Y)); + +#define LOAD_ELEMENT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##D = *((__global DATA_TYPE *)(PTR + OFFSET + 13 * STRIDE_Y)); + +#define LOAD_ELEMENT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##E = *((__global DATA_TYPE *)(PTR + OFFSET + 14 * STRIDE_Y)); + +#define LOAD_ELEMENT_16(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + LOAD_ELEMENT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ + VEC_DATA_TYPE(DATA_TYPE, N0) \ + BASENAME##F = *((__global DATA_TYPE *)(PTR + OFFSET + 15 * STRIDE_Y)); + +/** @}*/ // end of group LOAD_ELEMENT_n + +/** Load Scalar as Vector (consecutive elements). + * @name LOAD_SCALAR_AS_VECTOR + * + * Supported cases are M0=1,2,3,...,16 and N0=1,2,3,4,8,16 + * The data to load is expected to have consecutive names for each row. + * E.g., for M0=3, and BASENAME=c, the expected data is c0, c1 and c2. + * + * @param[in] M0 The number of consecutive rows + * @param[in] N0 The number of consecutive columns + * @param[in] DATA_TYPE The data type of the target + * @param[in] BASENAME The basename of the result variables + * @param[in] PTR The base pointer for the data + * @param[in] OFFSET The offset within a row + * @param[in] STRIDE_Y The stride in y-axis direction + * @{ + */ +#define LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_ELEMENT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) +#define LOAD_SCALAR_AS_VECTOR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) +/** @} */ // end of group LOAD_SCALAR_AS_VECTOR + /** Basic macros to calculate Z offset values from Z0 to Zn-1 * @name CALCULATE_Z_OFFSET_n * diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl index 8140b8f2a5..a2a47d7823 100644 --- a/src/core/CL/cl_kernels/gemmlowp.cl +++ b/src/core/CL/cl_kernels/gemmlowp.cl @@ -814,6 +814,301 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs), #undef RHS_OFFSET_X #undef RHS_STEP_X } + +#if defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER) +/** This OpenCL kernel computes the matrix multiplication between 2 matrices with fused output stage using fixed-point arithmetic. + * The LHS matrix is NOT reshaped + * The RHS matrix is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed + * + * @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar) + * @note The accumulator data type must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint) + * @note The number of columns of LHS matrix must be passed at compile time using -DK (i.e. -DK=64) + * @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (i.e. -DN0=8, -DK0=4). + * @note The number of M0 rows to process must be passed at compile time using -DM0 (i.e. -DM0=2) + * @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (i.e. -DH0=2) + * @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time. + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 = 1, 2, 3, 4, 5, 6, 7, 8 + * - N0 = 2, 3, 4, 8, 16 + * - K0 = 2, 3, 4, 8, 16 + * - H0 >= 1 + * + * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time: + * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D + * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D + * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. + * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor + * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix + * + * @note The offset, scalar scale factor and number of bits to shift right of output tensor must be passed at compile time using -DRESULT_OFFSET, -RESULT_MULTIPLIER and -DRESULT_SHIFT + * @note In case the addition of int32 biases is required, -DADD_BIAS should be passed at compile time + * @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE + * @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND. + * These values can be used to implement "rectified linear unit" activation functions + * @note In case of per-channel quantization of matrix B, -DPER_CHANNEL_QUANTIZATION must be passed at compile time. + * + * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8/QASYMM8_SIGNED + * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes) + * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes) + * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix + * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr + * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes) + * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes) + * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes) + * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D) + * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) + * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: S32 + * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes) + * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes) + * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor + * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: S32 + * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes) + * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes) + * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor + * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: S32 + * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes) + * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor + * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32 + * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes) + * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector + * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32 + * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes) + * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector + */ +__kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint(IMAGE_DECLARATION(lhs), + IMAGE_DECLARATION(rhs), + IMAGE_DECLARATION(dst), + uint lhs_stride_z, + uint rhs_stride_z, + uint dst_stride_z +#if defined(REINTERPRET_INPUT_AS_3D) + , + uint lhs_cross_plane_pad +#endif // REINTERPRET_INPUT_AS_3D +#if defined(REINTERPRET_OUTPUT_AS_3D) + , + uint dst_cross_plane_pad +#endif // REINTERPRET_OUTPUT_AS_3D +#if defined(A_OFFSET) + , + IMAGE_DECLARATION(sum_col) +#endif // defined(A_OFFSET) +#if defined(B_OFFSET) + , + IMAGE_DECLARATION(sum_row) +#endif // defined(B_OFFSET) +#if defined(ADD_BIAS) + , + VECTOR_DECLARATION(biases) +#endif // defined(ADD_BIAS) +#if defined(PER_CHANNEL_QUANTIZATION) + , + VECTOR_DECLARATION(result_multipliers), + VECTOR_DECLARATION(result_shifts) +#endif // defined(PER_CHANNEL_QUANTIZATION) + ) +{ + // Block size +#define RHS_BLOCK_SIZE ((K0) * (N0)) + + // RHS offset and step X +#if defined(RHS_INTERLEAVE) +#define RHS_OFFSET_X (K0) +#define RHS_STEP_X ((K0) * (H0)) +#define RHS_STEP_LOOP (1) +#else // defined(RHS_INTERLEAVE) +#define RHS_OFFSET_X (RHS_BLOCK_SIZE) +#define RHS_STEP_X (K0) +#define RHS_STEP_LOOP (H0) +#endif // defined(RHS_INTERLEAVE) + + uint x = get_global_id(0); + uint y = get_global_id(1); + uint z = get_global_id(2); + +#if defined(DUMMY_WORK_ITEMS) + if((x * N0 >= N) || (y * M0 >= M)) + { + return; + } +#endif // defined(DUMMY_WORK_ITEMS) + + // Compute LHS matrix address + uint lhs_offset = lhs_offset_first_element_in_bytes + y * M0 * (uint)lhs_stride_y; + + // Compute RHS matrix address + uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y; + +#if defined(MATRIX_B_DEPTH) + // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 + rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z; +#else // defined(MATRIX_B_DEPTH) + rhs_offset += z * rhs_stride_z; +#endif // defined(MATRIX_B_DEPTH) + + REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0; + REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0); + +#if defined(REINTERPRET_INPUT_AS_3D) + // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D + CALCULATE_Z_OFFSET(M0, uint, zlhs, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y); + + // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we + // multiply lhs_stride_z by DEPTH_GEMM3D + lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D; + +#else // defined(REINTERPRET_INPUT_AS_3D) + + // Add offset for batched GEMM + lhs_offset += z * lhs_stride_z; + +#endif // defined(REINTERPRET_INPUT_AS_3D) + + // Initialize the accumulators + REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(ACC_DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0; + + for(int i = 0; i < K; i += K0) + { + // Load values from LHS matrix + LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs); + + // Load values from RHS matrix + LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X, zrhs); + + // Partial matrix multiplication M0,N0,K0 + ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c); + + lhs_offset += K0; + rhs_offset += N0 * RHS_STEP_X * RHS_STEP_LOOP; + } + + // Result of MM is of type DATA_TYPE + __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(DATA_TYPE) + (y * (uint)M0 * dst_stride_y); + + REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0; + +#if defined(REINTERPRET_OUTPUT_AS_3D) + // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D + CALCULATE_Z_OFFSET(M0, uint, zout, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y); + + // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we + // multiply dst_stride_z by DEPTH_GEMM3D + dst_addr += z * dst_stride_z * DEPTH_GEMM3D; + +#else // defined(REINTERPRET_OUTPUT_AS_3D) + + // Add offset for batched GEMM + dst_addr += z * dst_stride_z; + +#endif // defined(REINTERPRET_OUTPUT_AS_3D) + + // Convert result of matrix multiplication to S32 + REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_int); + + int batch_id = z; +#if defined(DEPTH_GEMM3D) + batch_id /= (int)DEPTH_GEMM3D; +#endif // defined(DEPTH_GEMM3D) + + // Offset contribution: c += (A_OFFSET * sum_col) + (B_OFFSET * sum_row) + K_OFFSET; + REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(int, N0), offset_s32_, K_OFFSET); + +#if defined(A_OFFSET) + // Compute the offset contribution due to A_OFFSET + __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int); + +#if defined(SUM_COL_HAS_BATCHES) + sum_col_addr += z * sum_col_stride_y; +#endif // defined(SUM_COL_HAS_BATCHES) + VEC_DATA_TYPE(int, N0) + a_offset_s32 = VLOAD(N0)(0, (__global int *)sum_col_addr); + a_offset_s32 *= (VEC_DATA_TYPE(int, N0))A_OFFSET; + + REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, a_offset_s32); +#endif // defined(A_OFFSET) + +#if defined(B_OFFSET) + // Compute the offset contribution due to B_OFFSET + __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + (y * (uint)M0) * sizeof(int) + z * sum_row_stride_y; + +#if defined(HEIGHT_GEMM3D) && defined(DEPTH_GEMM3D) + sum_row_addr += (batch_id % (int)DEPTH_GEMM3D) * (int)HEIGHT_GEMM3D * sizeof(int); +#endif // defined(HEIGHT_GEMM3D) && defined(DEPTH_GEMM3D) + LOAD_SCALAR_AS_VECTOR(M0, N0, int, b_offset_s32_, sum_row_addr, 0, sum_row_stride_x); + + REPEAT_MLA_VAR_WITH_CONST_VEC(M0, offset_s32_, b_offset_s32_, (VEC_DATA_TYPE(int, N0))B_OFFSET); +#endif // defined(B_OFFSET) + +#if defined(ADD_BIAS) + // Add bias + __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int); + + VEC_DATA_TYPE(int, N0) + bias_values = VLOAD(N0)(0, (__global int *)bias_addr); + REPEAT_ADD_VECTOR_TO_VAR(M0, offset_s32_, bias_values); +#endif // defined(ADD_BIAS) + + REPEAT_ADD_TWO_VARS(M0, c_int, offset_s32_); + + // Multiply by result_mult_int and shift +#if defined(PER_CHANNEL_QUANTIZATION) + __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int); + __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + (x * (uint)N0) * sizeof(int); + + VEC_DATA_TYPE(int, N0) + res_mul = VLOAD(N0)(0, (__global int *)result_multipliers_addr); + VEC_DATA_TYPE(int, N0) + res_shift = VLOAD(N0)(0, (__global int *)result_shifts_addr); + + REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(M0, N0, c_int, res_mul, res_shift); +#else // defined(PER_CHANNEL_QUANTIZATION) + +#if RESULT_SHIFT < 0 + REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT); +#else // RESULT_SHIFT >= 0 + REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(M0, N0, c_int, RESULT_MULTIPLIER, RESULT_SHIFT); +#endif // RESULT_SHIFT < 0 + +#endif // defined(PER_CHANNEL_QUANTIZATION) + + // Add the offset terms to GEMM's result + REPEAT_ADD_CONST_TO_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, RESULT_OFFSET); + +#if defined(MIN_BOUND) + REPEAT_MAX_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MIN_BOUND); +#endif // defined(MIN_BOUND) +#if defined(MAX_BOUND) + REPEAT_MIN_CONST_VAR(M0, VEC_DATA_TYPE(int, N0), c_int, MAX_BOUND); +#endif // defined(MAX_BOUND) + + // Convert and store output block (does convert saturate) + CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, c_int, dst_addr, dst_stride_y, zout); + +#undef RHS_BLOCK_SIZE +#undef RHS_OFFSET_X +#undef RHS_STEP_X +} +#endif // defined(RESULT_OFFSET) && defined(RESULT_SHIFT) && defined(RESULT_MULTIPLIER) #endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE) && defined(K) #if defined(M0) && defined(N0) && defined(K0) && defined(K) @@ -840,7 +1135,7 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs), * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix * - * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: F16/F32 + * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: QASYMM8 * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes) * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes) @@ -852,7 +1147,7 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs), * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes) * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr + * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32 * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) @@ -1389,7 +1684,7 @@ __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result) vstore4(in_s32, 0, (__global int *)mm_result_addr); } -#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) +#if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE) /* OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel and it quantizes down to uint8. * * This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage. @@ -1742,7 +2037,7 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC // Store the result vstore4(res, 0, (__global OUTPUT_DATA_TYPE *)dst_addr); } -#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) +#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE) #endif // defined(K_OFFSET) diff --git a/src/core/CL/cl_kernels/repeat.h b/src/core/CL/cl_kernels/repeat.h index 691f7aea01..4e761db073 100644 --- a/src/core/CL/cl_kernels/repeat.h +++ b/src/core/CL/cl_kernels/repeat.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,6 +24,8 @@ #ifndef ARM_COMPUTE_REPEAT_H #define ARM_COMPUTE_REPEAT_H +#include "helpers.h" + /** Macros that help in loop unrolling */ //Repeat macros with 3 param, excluding the implicit ID param #define REPEAT_3_1(P_X, P_A, P_B, P_C) P_X##_DEF(0, P_A, P_B, P_C) @@ -76,8 +78,106 @@ #define REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_3_##P_NUM(P_OP, P_A, P_B, P_C) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM #define REPEAT_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C) -//Macro for initializing N variables. generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...) +// Repeat macros with 4 param, excluding the implicit ID param +#define REPEAT_4_1(P_X, P_A, P_B, P_C, P_D) P_X##_DEF(0, P_A, P_B, P_C, P_D) +#define REPEAT_4_2(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(1, P_A, P_B, P_C, P_D); \ + REPEAT_4_1(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_3(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(2, P_A, P_B, P_C, P_D); \ + REPEAT_4_2(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_4(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(3, P_A, P_B, P_C, P_D); \ + REPEAT_4_3(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_5(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(4, P_A, P_B, P_C, P_D); \ + REPEAT_4_4(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_6(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(5, P_A, P_B, P_C, P_D); \ + REPEAT_4_5(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_7(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(6, P_A, P_B, P_C, P_D); \ + REPEAT_4_6(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_8(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(7, P_A, P_B, P_C, P_D); \ + REPEAT_4_7(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_9(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(8, P_A, P_B, P_C, P_D); \ + REPEAT_4_8(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_10(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(9, P_A, P_B, P_C, P_D); \ + REPEAT_4_9(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_11(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(A, P_A, P_B, P_C, P_D); \ + REPEAT_4_10(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_12(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(B, P_A, P_B, P_C, P_D); \ + REPEAT_4_11(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_13(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(C, P_A, P_B, P_C, P_D); \ + REPEAT_4_12(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_14(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(D, P_A, P_B, P_C, P_D); \ + REPEAT_4_13(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_15(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(E, P_A, P_B, P_C, P_D); \ + REPEAT_4_14(P_X, P_A, P_B, P_C, P_D) +#define REPEAT_4_16(P_X, P_A, P_B, P_C, P_D) \ + P_X##_DEF(F, P_A, P_B, P_C, P_D); \ + REPEAT_4_15(P_X, P_A, P_B, P_C, P_D) + +#define REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_4_##P_NUM(P_OP, P_A, P_B, P_C, P_D) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM +#define REPEAT_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) + +// Macro for initializing N variables. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...) #define VAR_INIT_TO_CONST_DEF(ID, TYPE, VAR, VAL) TYPE VAR##ID = VAL #define REPEAT_VAR_INIT_TO_CONST(N, TYPE, VAR, VAL) REPEAT_3_N(N, VAR_INIT_TO_CONST, TYPE, VAR, VAL) +// Macro for initializing N variables by converting the data type. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...) +#define VAR_INIT_CONVERT_SAT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT_SAT(VAR_IN##ID, TYPE_OUT) +#define REPEAT_VAR_INIT_CONVERT_SAT(N, TYPE_OUT, VAR_IN, VAR_OUT) REPEAT_3_N(N, VAR_INIT_CONVERT_SAT, TYPE_OUT, VAR_IN, VAR_OUT) + +// Macro for adding a constant to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...) +#define ADD_CONST_TO_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID += (TYPE)VAL +#define REPEAT_ADD_CONST_TO_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, ADD_CONST_TO_VAR, TYPE, VAR, VAL) + +// Macro for multiplying N variables (VAR_B) by a constant (VAL) and adding to other N variables (VAR_A). Generates N statements that defines VAR_A##N =RHS_ACCESSOR_DEF(...) +#define MLA_VAR_WITH_CONST_VEC_DEF(ID, VAR_A, VAR_B, VAL) VAR_A##ID += VAR_B##ID * VAL +#define REPEAT_MLA_VAR_WITH_CONST_VEC(N, VAR_A, VAR_B, VAL) REPEAT_3_N(N, MLA_VAR_WITH_CONST_VEC, VAR_A, VAR_B, VAL) + +// Macro for adding a vector to N-variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...) +#define ADD_VECTOR_TO_VAR_DEF(ID, TYPE, VAR, VEC) VAR##ID += VEC +#define REPEAT_ADD_VECTOR_TO_VAR(N, VAR, VEC) REPEAT_3_N(N, ADD_VECTOR_TO_VAR, "", VAR, VEC) + +// Macro for adding a two N-variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...) +#define ADD_TWO_VARS_DEF(ID, TYPE, VAR_A, VAR_B) VAR_A##ID += VAR_B##ID +#define REPEAT_ADD_TWO_VARS(N, VAR_A, VAR_B) REPEAT_3_N(N, ADD_TWO_VARS, "", VAR_A, VAR_B) + +// Macro for performing Max between a constant and N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...) +#define MAX_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = max(VAR##ID, (TYPE)VAL) +#define REPEAT_MAX_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MAX_CONST_VAR, TYPE, VAR, VAL) + +// Macro for performing Min between a constant and N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...) +#define MIN_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = min(VAR##ID, (TYPE)VAL) +#define REPEAT_MIN_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MIN_CONST_VAR, TYPE, VAR, VAL) + +// Macro for performing ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...) +#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE) +#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT) + +// Macro for performing ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...) +#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE) +#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT) + +// Macro for performing per-channel ASYMM_MULT_BY_QUANT_MULTIPLIER to N variables. +#define ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) \ + ({ \ + VEC_DATA_TYPE(int, N0) \ + VAR##ID_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, N0); \ + VEC_DATA_TYPE(int, N0) \ + VAR##ID_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, N0); \ + VAR##ID = select(VAR##ID_shift_lt0, VAR##ID_shift_gt0, RES_SHIFT >= 0); \ + }) +#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL, SIZE, VAR, RES_MUL, RES_SHIFT) + #endif // ARM_COMPUTE_REPEAT_H diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp index 3fa2fad8fd..c4ed691f2e 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -50,21 +50,27 @@ namespace { using ElementsProcessed = Steps; -Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - const GEMMReshapeInfo &gemm_info) +Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMKernelInfo &gemm_info, + const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, + const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3"); + + const GEMMRHSMatrixInfo rhs_info = gemm_info.rhs_info; + const GEMMLHSMatrixInfo lhs_info = gemm_info.lhs_info; + const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage; + ARM_COMPUTE_RETURN_ERROR_ON_MSG((((rhs_info.k0 & (rhs_info.k0 - 1)) && rhs_info.k0 != 3) || (rhs_info.k0 > 16)), "Only 2,3,4,8,16 are supported for k0"); ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 1 || lhs_info.m0 > 8); ARM_COMPUTE_RETURN_ERROR_ON_MSG((((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3) || rhs_info.n0 > 16), "Only 2,3,4,8,16 are supported for n0"); - const int m = gemm_info.m(); - const int n = gemm_info.n(); - const int k = gemm_info.k(); + const int m = gemm_info.m; + const int n = gemm_info.n; + const int k = gemm_info.k; TensorShape tensor_shape1{ input1->tensor_shape() }; tensor_shape1.set(0, n); @@ -74,7 +80,7 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info)); ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != static_cast(k)); - if(gemm_info.reinterpret_input_as_3d()) + if(gemm_info.reinterpret_input_as_3d) { ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(1) * input0->dimension(2) != static_cast(m)); } @@ -84,23 +90,118 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, } ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1); + const TensorShape expected_output_shape = compute_mm_shape(*input0, *input1, gemm_info); if(output->total_size() != 0) { - const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)); + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + if(output_stage.type == GEMMLowpOutputStageType::NONE) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, output); + } + } + + if(bias != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); + ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != bias->dimension(0)); } + ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN) || (output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT), + "Only GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT is supported"); + + // Checks performed if the output stage needs to be fused + if(output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) + { + // If a_offset == 0, vector_sum_col can be a nullptr + if(gemm_info.a_offset != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != expected_output_shape[0]); + } + + // If b_offset == 0, vector_sum_row can be a nullptr + if(gemm_info.b_offset != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); + + // Check if mm result is a 3D reinterpretation + const bool reinterpret_as_3d = expected_output_shape.num_dimensions() > 1 && expected_output_shape.y() != vector_sum_row->tensor_shape().x(); + + // Validate input + ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (expected_output_shape[1] * expected_output_shape[2])); + ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != expected_output_shape[1]); + + if(expected_output_shape.num_dimensions() > 1) + { + const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2; + + TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape(); + vector_sum_row_shape.collapse_from(1); + TensorShape collapsed_output_shape(expected_output_shape); + collapsed_output_shape.collapse_from(output_batch_idx); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != collapsed_output_shape[output_batch_idx], + "vector_sum_row must have the same number of batches of output tensor"); + + if(gemm_info.a_offset != 0) + { + TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape(); + vector_sum_col_shape.collapse_from(1); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1], + "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1"); + } + } + } + + PixelValue min_val{}; + PixelValue max_val{}; + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != output->data_type()); + std::tie(min_val, max_val) = get_min_max(output->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > max_val.get()); + ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < min_val.get() || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound); + } + else + { + std::tie(min_val, max_val) = get_min_max(output_stage.output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > max_val.get()); + ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < min_val.get() || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound); + } + + if(output_multipliers != nullptr && output_shifts != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1); + if(output_stage.is_quantized_per_channel) + { + ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != output_shifts->dimension(0)); + ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != output_multipliers->dimension(0)); + } + } + } return Status{}; } -std::pair validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - const GEMMReshapeInfo &gemm_info, ElementsProcessed &num_elements_processed) +std::pair validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *output, const GEMMKernelInfo &gemm_info, + ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias, + ITensorInfo *output_multipliers, ITensorInfo *output_shifts, ElementsProcessed &num_elements_processed) { + const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage; + unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0]; unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1]; - bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); - bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0); + bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d; + bool reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d != 0); Window win{}; Window win_out{}; @@ -114,7 +215,15 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe } // Output tensor auto initialization if not yet initialized - auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, gemm_info)).set_data_type(DataType::S32)); + const TensorShape expected_output_shape = compute_mm_shape(*input0, *input1, gemm_info); + if(output_stage.type != GEMMLowpOutputStageType::NONE) + { + auto_init_if_empty(*output, input0->clone()->set_tensor_shape(expected_output_shape).set_data_type(output_stage.output_data_type)); + } + else + { + auto_init_if_empty(*output, input0->clone()->set_tensor_shape(expected_output_shape).set_data_type(DataType::S32)); + } TensorInfo tmp_info(*output); @@ -128,19 +237,19 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe } // Configure kernel window - num_elems_processed_per_iteration_x = rhs_info.n0; - num_elems_processed_per_iteration_y = lhs_info.m0; + num_elems_processed_per_iteration_x = gemm_info.rhs_info.n0; + num_elems_processed_per_iteration_y = gemm_info.lhs_info.m0; // Note: bottom paddings are calculated manually as the output can be reinterpreted as 3D tensor // The only way to set properly the paddings, it is to set those explicitly through the AccessWindowStatic - const int m = reinterpret_output_as_3d ? gemm_info.m() : input0->dimension(1); + const int m = reinterpret_output_as_3d ? gemm_info.m : input0->dimension(1); const int bottom_pad = (num_elems_processed_per_iteration_y - (m % num_elems_processed_per_iteration_y)) % num_elems_processed_per_iteration_y; win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); AccessWindowStatic input0_access(input0, 0, 0, - ceil_to_multiple(input0->dimension(0), lhs_info.k0), + ceil_to_multiple(input0->dimension(0), gemm_info.lhs_info.k0), input0->dimension(1) + bottom_pad); AccessWindowStatic input1_access(input1, 0, 0, input1->dimension(0), @@ -152,6 +261,30 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor + if(output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) + { + if(gemm_info.a_offset != 0) + { + AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration_x); + window_changed = window_changed || update_window_and_padding(win_out, vector_sum_col_access); + } + // No access window needed for vector_sum_row + ARM_COMPUTE_UNUSED(vector_sum_row); + + if(bias != nullptr) + { + AccessWindowHorizontal bias_access(bias, 0, num_elems_processed_per_iteration_x); + window_changed = window_changed || update_window_and_padding(win_out, bias_access); + } + + if(output_multipliers != nullptr && output_multipliers->dimension(0) > 1) + { + AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_processed_per_iteration_x); + AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_processed_per_iteration_x); + window_changed = window_changed || update_window_and_padding(win_out, output_multipliers_access, output_shifts_access); + } + } + output_access.set_valid_region(win_out, ValidRegion(Coordinates(), output->tensor_shape())); // Collapse along the Z direction @@ -166,23 +299,56 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe } // namespace CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel() - : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_input_as_3d(false), _reinterpret_output_as_3d(false), _use_dummy_work_items(false) + : _input0(nullptr), + _input1(nullptr), + _output(nullptr), + _vector_sum_col(nullptr), + _vector_sum_row(nullptr), + _bias(nullptr), + _output_multipliers(nullptr), + _output_shifts(nullptr), + _slide_matrix_b(true), + _reinterpret_input_as_3d(false), + _reinterpret_output_as_3d(false), + _use_dummy_work_items(false), + _is_quantized_per_channel(false), + _fuse_output_stage(false) { } -void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - const GEMMReshapeInfo &gemm_info) +void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMKernelInfo &gemm_info, + const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, + const ICLTensor *output_multipliers, const ICLTensor *output_shifts) { ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output); - - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), + input1->info(), + output->info(), + gemm_info, + vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, + vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, + bias != nullptr ? bias->info() : nullptr, + output_multipliers != nullptr ? output_multipliers->info() : nullptr, + output_shifts != nullptr ? output_shifts->info() : nullptr)); + + const GEMMRHSMatrixInfo rhs_info = gemm_info.rhs_info; + const GEMMLHSMatrixInfo lhs_info = gemm_info.lhs_info; + const GEMMLowpOutputStageInfo output_stage = gemm_info.output_stage; + const int32_t a_offset = gemm_info.a_offset; + const int32_t b_offset = gemm_info.b_offset; _input0 = input0; _input1 = input1; _output = output; - _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); - _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0); + _vector_sum_col = vector_sum_col; + _vector_sum_row = vector_sum_row; + _bias = bias; + _output_multipliers = output_multipliers; + _output_shifts = output_shifts; + _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d; + _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d != 0); _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device()); + _is_quantized_per_channel = output_stage.is_quantized_per_channel; // In case both input and output have to be reinterpreted as 3D tensors, // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false. @@ -199,7 +365,16 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i ElementsProcessed num_elements_processed{}; // Configure kernel window - auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed); + auto win_config = validate_and_configure_window(input0->info(), + input1->info(), + output->info(), + gemm_info, + vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, + vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, + bias != nullptr ? bias->info() : nullptr, + output_multipliers != nullptr ? output_multipliers->info() : nullptr, + output_shifts != nullptr ? output_shifts->info() : nullptr, + num_elements_processed); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure_internal(win_config.second); @@ -213,8 +388,8 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE"); build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS"); build_opts.add_option("-DM=" + support::cpp11::to_string(input0->info()->dimension(1))); - build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n())); - build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k())); + build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n)); + build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k)); build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0)); build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0)); build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0)); @@ -225,6 +400,35 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i std::string kernel_name("gemmlowp_mm_reshaped_only_rhs_"); kernel_name += rhs_info.transpose ? "t" : "nt"; + if(output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) + { + kernel_name += "_fused_output_stage_fixedpoint"; + _fuse_output_stage = true; + // If a_offset == 0, vector_sum_col can be a nullptr + if(a_offset != 0) + { + build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset)); + build_opts.add_option_if(vector_sum_col->info()->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES"); + } + // If b_offset == 0, vector_sum_row can be a nullptr + build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset)); + build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * input0->info()->dimension(0))); + build_opts.add_option_if(bias != nullptr, "-DADD_BIAS"); + build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset)); + build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0])); + build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0])); + build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION"); + + const int min = output_stage.gemmlowp_min_bound; + const int max = output_stage.gemmlowp_max_bound; + + PixelValue min_val{}; + PixelValue max_val{}; + std::tie(min_val, max_val) = get_min_max(output->info()->data_type()); + build_opts.add_option_if((min != min_val.get()) && (min != max), "-DMIN_BOUND=" + support::cpp11::to_string(min)); + build_opts.add_option_if((max != max_val.get()) && (min != max), "-DMAX_BOUND=" + support::cpp11::to_string(max)); + } + // Create kernel _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); @@ -239,7 +443,7 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i _config_id += "_"; _config_id += support::cpp11::to_string(output->info()->dimension(0)); _config_id += "_"; - _config_id += support::cpp11::to_string(gemm_info.k()); + _config_id += support::cpp11::to_string(gemm_info.k); _config_id += "_"; _config_id += support::cpp11::to_string(output->info()->dimension(2)); _config_id += "_"; @@ -254,17 +458,21 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i _config_id += support::cpp11::to_string(rhs_info.interleave); } -Status CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, - const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info) +Status CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMKernelInfo &gemm_info, + const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, + const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts) { ElementsProcessed num_elements_processed{}; - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, lhs_info, rhs_info, gemm_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, gemm_info, vector_sum_col, vector_sum_row, bias, output_multipliers, output_shifts)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(), input1->clone().get(), output->clone().get(), - lhs_info, - rhs_info, gemm_info, + vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr, + vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr, + bias != nullptr ? bias->clone().get() : nullptr, + output_multipliers != nullptr ? output_multipliers->clone().get() : nullptr, + output_shifts != nullptr ? output_shifts->clone().get() : nullptr, num_elements_processed) .first); @@ -304,6 +512,21 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::run(const Window &window, cl _kernel.setArg(idx0, static_cast(total_cross_plane_pad)); } + // Set window for vector_sum_col + Window win_vector_sum_col = slice; + win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0)); + win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); + + // Set window for vector_sum_row + Window win_vector_sum_row = slice; + win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); + win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0)); + win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); + + Window biases_slice = slice; + biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1)); + biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1)); + do { Window slice_b = slice; @@ -321,6 +544,26 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::run(const Window &window, cl _kernel.setArg(idx++, static_cast(_input0->info()->strides_in_bytes()[2])); _kernel.setArg(idx++, static_cast(_input1->info()->strides_in_bytes()[2])); _kernel.setArg(idx++, static_cast(_output->info()->strides_in_bytes()[2])); + if(_reinterpret_input_as_3d) + { + // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor + idx++; + } + + if(_reinterpret_output_as_3d) + { + // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor + idx++; + } + + if(_fuse_output_stage) + { + add_2D_tensor_argument_if((_vector_sum_col != nullptr), idx, _vector_sum_col, win_vector_sum_col); + add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row); + add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice); + add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_multipliers, biases_slice); + add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_shifts, biases_slice); + } enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items); } while(window.slide_window_slice_3D(slice)); diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp index cdb78c291d..54b63df924 100644 --- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp @@ -75,8 +75,9 @@ CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptrinit(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); + _gemm_output_stage_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); - if(_is_gemm_reshaped) + GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage(); + gemmlowp_output_stage.output_data_type = _matrix_a->info()->data_type(); + + gemm_kernel_info.output_stage = gemmlowp_output_stage; + + if(_is_gemm_reshaped && gemmlowp_output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) { - // Configure and tune matrix multiply kernel - _mm_reshaped_only_rhs_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + // Configure and tune matrix multiply kernel with fused output stage + _mm_reshaped_only_rhs_kernel.configure(_matrix_a, matrix_b, output, gemm_kernel_info, _a_offset == 0 ? nullptr : &_vector_sum_col, + _b_offset == 0 ? nullptr : &_vector_sum_row, c, &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts); } else { - if(_is_midgard) + _run_output_stage = true; + + _memory_group.manage(&_mm_result_s32); + + if(_is_gemm_reshaped) { - // Configure matrix multiply kernel - _mm_midgard_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + _mm_reshaped_only_rhs_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, gemm_kernel_info); } else { - // Pick up the GEMM configuration - std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8); - - // Configure matrix multiply kernel - _mm_native_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + if(_is_midgard) + { + // Configure matrix multiply kernel + _mm_midgard_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + } + else + { + // Pick up the GEMM configuration + std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8); + + // Configure matrix multiply kernel + _mm_native_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + } + _offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0), + _a_offset, _b_offset, gemmlowp_output_stage, &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts); + + _mm_result_s32.allocator()->allocate(); } } - // Configure offset contribution kernel - const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1; - - _gemm_output_stage_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); - _gemm_output_stage_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); - - GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage(); - gemmlowp_output_stage.output_data_type = _matrix_a->info()->data_type(); - _offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0), - _a_offset, _b_offset, gemmlowp_output_stage, &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts); _gemm_output_stage_multipliers.allocator()->allocate(); _gemm_output_stage_shifts.allocator()->allocate(); @@ -220,15 +244,14 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor std::memcpy(_gemm_output_stage_shifts.ptr_to_element(Coordinates(0)), gemm_info.gemmlowp_output_stage().gemmlowp_shifts.data(), num_filters * sizeof(int32_t)); _gemm_output_stage_multipliers.unmap(); _gemm_output_stage_shifts.unmap(); - - _mm_result_s32.allocator()->allocate(); } else { + _run_offset_contribution = true; if(_is_gemm_reshaped) { // Configure and tune matrix multiply kernel - _mm_reshaped_only_rhs_kernel.configure(_matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + _mm_reshaped_only_rhs_kernel.configure(_matrix_a, matrix_b, output, gemm_kernel_info); } else { @@ -350,61 +373,85 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row)); } + GEMMKernelInfo gemm_kernel_info; + gemm_kernel_info.m = m; + gemm_kernel_info.n = n; + gemm_kernel_info.k = k; + gemm_kernel_info.depth_output_gemm3d = depth_output_gemm3d; + gemm_kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d; + gemm_kernel_info.lhs_info = lhs_info; + gemm_kernel_info.rhs_info = rhs_info; + gemm_kernel_info.a_offset = a_offset; + gemm_kernel_info.b_offset = b_offset; if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE) { - TensorInfo mm_result_s32_info{}; + const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1; - if(reshape_matrix_b) - { - // Output tensor auto inizialitation if not yet initialized - auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_info)).set_data_type(DataType::S32)); + const TensorInfo gemm_output_stage_multipliers_shifts_info(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); - // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info)); + GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage(); + gemmlowp_output_stage.output_data_type = a->data_type(); + + gemm_kernel_info.output_stage = gemmlowp_output_stage; + if(reshape_matrix_b && gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) + { + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, gemm_kernel_info, + a_offset == 0 ? nullptr : &info_vector_sum_col, + b_offset == 0 ? nullptr : &info_vector_sum_row, + c, + &gemm_output_stage_multipliers_shifts_info, + &gemm_output_stage_multipliers_shifts_info)); } else { - // Output tensor auto inizialitation if not yet initialized - auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, false, reshape_info)).set_data_type(DataType::S32)); + TensorInfo mm_result_s32_info{}; - if(is_midgard) + if(reshape_matrix_b) { + // Output tensor auto inizialitation if not yet initialized + auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_info)).set_data_type(DataType::S32)); + // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, reshape_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, gemm_kernel_info)); } else { - // Pick up the GEMM configuration - std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8); - - // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyNativeKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info)); + // Output tensor auto inizialitation if not yet initialized + auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, false, reshape_info)).set_data_type(DataType::S32)); + + if(is_midgard) + { + // Validate matrix multiply + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, reshape_info)); + } + else + { + // Pick up the GEMM configuration + std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8); + + // Validate matrix multiply + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyNativeKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info)); + } } - } - - // Validate offset contribution kernel - const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1; - const TensorInfo gemm_output_stage_multipliers_shifts_info(TensorInfo(TensorShape(num_filters), 1, DataType::S32)); - - GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage(); - gemmlowp_output_stage.output_data_type = a->data_type(); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info, - a_offset == 0 ? nullptr : &info_vector_sum_col, - b_offset == 0 ? nullptr : &info_vector_sum_row, - c, - output, - a_offset, b_offset, - gemmlowp_output_stage, - &gemm_output_stage_multipliers_shifts_info, - &gemm_output_stage_multipliers_shifts_info)); + // Validate offset contribution kernel + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info, + a_offset == 0 ? nullptr : &info_vector_sum_col, + b_offset == 0 ? nullptr : &info_vector_sum_row, + c, + output, + a_offset, b_offset, + gemmlowp_output_stage, + &gemm_output_stage_multipliers_shifts_info, + &gemm_output_stage_multipliers_shifts_info)); + } } else { if(reshape_matrix_b) { // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, lhs_info, rhs_info, reshape_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, gemm_kernel_info)); } else { @@ -458,6 +505,12 @@ void CLGEMMLowpMatrixMultiplyCore::run() CLScheduler::get().enqueue(_mtx_b_reduction_kernel, false); } + // Run matrix A reduction kernel only if _b_offset is not equal to 0 + if(_b_offset != 0) + { + CLScheduler::get().enqueue(_mtx_a_reduction_kernel, false); + } + // Run matrix multiply if(_is_gemm_reshaped) { @@ -474,19 +527,12 @@ void CLGEMMLowpMatrixMultiplyCore::run() CLScheduler::get().enqueue(_mm_native_kernel, false); } } - - // Run matrix A reduction kernel only if _b_offset is not equal to 0 - if(_b_offset != 0) - { - CLScheduler::get().enqueue(_mtx_a_reduction_kernel, false); - } - - if(_fuse_output_stage) + if(_run_output_stage) { // Run offset contribution/output stage kernel CLScheduler::get().enqueue(_offset_contribution_output_stage_kernel, true); } - else + if(_run_offset_contribution) { // Run offset contribution kernel CLScheduler::get().enqueue(_offset_contribution_kernel, true); diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp index 30c91b7091..f4083bfd95 100644 --- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -130,7 +130,12 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned rhs_info.interleave = i_value_rhs; rhs_info.transpose = true; - GEMMReshapeInfo gemm_info(M, N, K); + GEMMKernelInfo gemm_info; + gemm_info.m = M; + gemm_info.n = N; + gemm_info.k = K; + gemm_info.lhs_info = lhs_info; + gemm_info.rhs_info = rhs_info; const TensorShape lhs_shape(K, M, b_value); const TensorShape rhs_shape(N, K, b_value); @@ -152,7 +157,7 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned // Create and configure function CLGEMMLowpMatrixMultiplyReshapedOnlyRHS gemm; - gemm.configure(&lhs, &rhs_reshaped, &dst, lhs_info, rhs_info, gemm_info); + gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info); } } // namespace diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index d13ada9d64..0207f4c5ae 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -24,6 +24,7 @@ #ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE #define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE +#include "arm_compute/core/KernelDescriptors.h" #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" @@ -1012,13 +1013,19 @@ protected: const unsigned int N = rhs_shape[0]; const unsigned int K = lhs_shape[0]; + GEMMKernelInfo gemm_info; + gemm_info.m = M; + gemm_info.n = N; + gemm_info.k = K; + gemm_info.lhs_info = lhs_info; + gemm_info.rhs_info = rhs_info; // The output tensor will be auto-initialized within the function // Create and configure function ReshapeRHSFunctionType reshape_rhs; GEMMFunctionType gemm; reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); + gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info); ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -1148,13 +1155,20 @@ protected: const unsigned int N = rhs_shape[0]; const unsigned int K = lhs_shape[0]; + GEMMKernelInfo gemm_info; + gemm_info.m = M; + gemm_info.n = N; + gemm_info.k = K; + gemm_info.depth_output_gemm3d = m_h; + gemm_info.lhs_info = lhs_info; + gemm_info.rhs_info = rhs_info; // The output tensor will be auto-initialized within the function // Create and configure function ReshapeRHSFunctionType reshape_rhs; GEMMFunctionType gemm; reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h)); + gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info); ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); -- cgit v1.2.1