From 959c26d0457deeebf7306b9e4317863f144415b5 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Mon, 2 Dec 2019 16:22:35 +0000 Subject: COMPMID-2790: Add support for QASYMM8_SIGNED in CLGEMMLowpMatrixMultiplyCore Change-Id: Ifdaeb53c512ba697f174649c026075010f54f628 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/2472 Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins Reviewed-by: Sang-Hoon Park Tested-by: Arm Jenkins Reviewed-by: Giuseppe Rossini --- .../core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h | 8 ++++---- .../kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h | 9 +++++---- arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h | 10 +++++----- arm_compute/core/Types.h | 11 ++++++----- .../runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h | 8 ++++---- 5 files changed, 24 insertions(+), 22 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h index db4bf3664a..e1191f265e 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,7 @@ namespace arm_compute { class ICLTensor; -/** OpenCL kernel to multiply matrices with QASYMM8 data type */ +/** OpenCL kernel to multiply matrices with QASYMM8/QASYMM8_SIGNED data type */ class CLGEMMLowpMatrixMultiplyNativeKernel : public ICLKernel { public: @@ -46,7 +46,7 @@ public: CLGEMMLowpMatrixMultiplyNativeKernel &operator=(CLGEMMLowpMatrixMultiplyNativeKernel &&) = default; /** Initialise the kernel's input and output. * - * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8 + * @param[in] input0 Input tensor containing the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED * @param[in] input1 Input tensor containing the RHS matrix. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32 * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread @@ -60,7 +60,7 @@ public: void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixMultiplyNativeKernel * - * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: QASYMM8 + * @param[in] input0 Input tensor info for the LHS matrix. Data type supported: QASYMM8/QASYMM8_SIGNED * @param[in] input1 Input tensor info for the RHS matrix. Data type supported: same as @p input0 * @param[in] output Output tensor info. Data type supported: S32 * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h index 44a91fef18..4094bc681e 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ class ICLTensor; * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution * of matrix A and matrix B and performs the output stage defined by the output_stage argument * + * @note For quantized computations the output data type for auto-initialization must be passed as part of the @ref GEMMLowpOutputStageInfo. */ class CLGEMMLowpOffsetContributionOutputStageKernel : public ICLKernel { @@ -58,7 +59,7 @@ public: * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required. * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. - * @param[out] output Output tensor. Data type supported: QASYMM8. + * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED. * @param[in] k Number of matrix A columns or Matrix B rows * @param[in] a_offset Offset to be added to each element of the matrix A. * @param[in] b_offset Offset to be added to each element of the matrix B. @@ -72,14 +73,14 @@ public: const GEMMLowpOutputStageInfo &output_stage, const ICLTensor *output_multipliers, const ICLTensor *output_shifts); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel * - * @param[in] mm_result Input tensor containing the result of @ref CLGEMMLowpOffsetContributionKernel. Data type supported: S32 or QASYMM8 if output_stage != NONE + * @param[in] mm_result Input tensor containing the result of @ref CLGEMMLowpOffsetContributionKernel. Data type supported: S32 * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B. * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A. * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required. * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. - * @param[in] output Output tensor. Data type supported: QASYMM8. + * @param[in] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED. * @param[in] a_offset Offset to be added to each element of the matrix A. * @param[in] b_offset Offset to be added to each element of the matrix B. * @param[in] output_stage GEMMLowp output stage info diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h index c42b218dc1..4e52a8029e 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -67,13 +67,13 @@ class CLGEMMLowpMatrixAReductionKernel : public ICLGEMMLowpReductionKernel public: /** Initialise the kernel's input and output. * - * @param[in] mtx_a Input tensor. Data type supported: QASYMM8 + * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32 */ void configure(const ICLTensor *mtx_a, ICLTensor *vector_sum_row) override; /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixAReductionKernel * - * @param[in] mtx_a Input tensor. Data type supported: QASYMM8 + * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED * @param[in] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32 * * @return a status @@ -94,13 +94,13 @@ class CLGEMMLowpMatrixBReductionKernel : public ICLGEMMLowpReductionKernel public: /** Initialise the kernel's input and output. * - * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32 */ void configure(const ICLTensor *mtx_b, ICLTensor *vector_sum_col) override; /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpMatrixBReductionKernel * - * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED * @param[in] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32 * * @return a status diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index 901d080b0e..cbcd3fa783 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -1866,10 +1866,10 @@ struct DepthwiseConvolutionReshapeInfo /** GEMMLowp output stage type */ enum class GEMMLowpOutputStageType { - NONE, /**< No quantization to uint8 */ - QUANTIZE_DOWN, /**< Quantize to uint8 using an integer multiplication */ - QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize to uint8 using a fixed point multiplication */ - QUANTIZE_DOWN_FLOAT /**< Quantize to uint8 using a floating point multiplication */ + NONE, /**< No quantization */ + QUANTIZE_DOWN, /**< Quantize using an integer multiplication */ + QUANTIZE_DOWN_FIXEDPOINT, /**< Quantize using a fixed point multiplication */ + QUANTIZE_DOWN_FLOAT /**< Quantize using a floating point multiplication */ }; /** GEMMLowp output stage info */ @@ -1884,6 +1884,7 @@ struct GEMMLowpOutputStageInfo std::vector gemmlowp_multipliers{}; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */ std::vector gemmlowp_shifts{}; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */ bool is_quantized_per_channel{ false }; /**< GEMMLowp quantized per-channel flag */ + DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */ }; /** GEMM LHS (Left Hand Side) matrix information */ diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h index 770ef0b287..66c5e9ee46 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h +++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -76,10 +76,10 @@ public: * -# Compute the matrix product of the resulting a * b in int32. * -# Quantize to uint8 if gemm_info.gemmlowp_output_stage != NONE * - * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8. + * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8/QASYMM8_SIGNED. * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a * @param[in] c Third input tensor (Matrix C). It can be a nullptr. Data type supported: S32 - * @param[out] output Output tensor. Data type supported: S32 or QASYMM8 if gemm_info.gemmlowp_output_stage != NONE + * @param[out] output Output tensor. Data type supported: S32 or QASYMM8/QASYMM8_SIGNED if gemm_info.gemmlowp_output_stage != NONE * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and * if the reshape of matrix B should be executed only for the first run */ @@ -89,7 +89,7 @@ public: * @param[in] a First input tensor info (Matrix A). Data type supported: QASYMM8. * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type supported: S32 - * @param[in] output Output tensor info. Data type supported: S32 or QASYMM8 if gemm_info.gemmlowp_output_stage != NONE + * @param[in] output Output tensor info. Data type supported: S32 or QASYMM8/QASYMM8_SIGNED if gemm_info.gemmlowp_output_stage != NONE * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and * if the reshape of matrix B should be executed only for the first run * -- cgit v1.2.1