diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2020-03-12 19:34:33 +0000 |
---|---|---|
committer | Michele Di Giorgio <michele.digiorgio@arm.com> | 2020-03-16 09:42:36 +0000 |
commit | a602f03f4c66e5ee2480f1a3fc66847968fc1076 (patch) | |
tree | a2752ca0de84f7920dd7296151d14e5edc8cacc0 /arm_compute | |
parent | 0ec53a0e54ae0be0ed9c4e4c14a5fd10ed5f48a8 (diff) | |
download | ComputeLibrary-a602f03f4c66e5ee2480f1a3fc66847968fc1076.tar.gz |
COMPMID-3237: Extend GEMMLowpReduction kernels to multiply reductions by a scalar value
Change-Id: If2a242f52aea753591525d30a4cb64c1a766bf8d
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2881
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/core/KernelDescriptors.h | 22 | ||||
-rw-r--r-- | arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h | 71 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEGEMM.h | 4 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h | 10 |
4 files changed, 74 insertions, 33 deletions
diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h index 58400b190b..d9d3e1a4d8 100644 --- a/arm_compute/core/KernelDescriptors.h +++ b/arm_compute/core/KernelDescriptors.h @@ -124,5 +124,27 @@ struct InstanceNormalizationLayerKernelInfo float epsilon; /**< Lower bound value for the normalization. Defaults to 1e-12 */ bool use_mixed_precision; /**< Use mixed precision in case of FP16 execution. Defaults to true */ }; + +struct GEMMLowpReductionKernelInfo +{ + /** Default constructor */ + GEMMLowpReductionKernelInfo() = default; + /** Constructor + * + * @param[in] k Number of matrix columns/rows. + * @param[in] is_reshaped True if the input tensor has been reshaped. + * @param[in] scalar Scalar value to multiply each reduced column/row by. + * @param[in] mul_by_scalar True if each column/row reduction has to be multiplied by a scalar value. + */ + GEMMLowpReductionKernelInfo(int32_t k, bool is_reshaped, int32_t scalar, bool mul_by_scalar) + : k(k), is_reshaped(is_reshaped), scalar(scalar), mul_by_scalar(mul_by_scalar) + { + } + + int32_t k{ 0 }; /**< Number of matrix columns/rows */ + bool is_reshaped{ false }; /**< True if the input tensor has been reshaped */ + int32_t scalar{ 0 }; /**< Scalar value to multiply each reduced column/row by */ + bool mul_by_scalar{ false }; /**< True if each column/row reduction has to be multiplied by a scalar value */ +}; } // namespace arm_compute #endif /* ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H */ diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h index fb781aea28..1e472f5252 100644 --- a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h +++ b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,9 @@ namespace arm_compute { +// Forward declarations class ITensor; +struct GEMMLowpReductionKernelInfo; /** Common interface for all NEON reduction kernels */ class INEGEMMLowpReductionKernel : public INEKernel @@ -47,18 +49,23 @@ public: /** Initialise the kernel's input and output. * - * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32 - * @param[in] k Number of matrix A columns (or matrix B rows) - * @param[in] is_reshaped True if the input tensor has been reshaped + * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32 + * @param[in] info Kernel metadata: + * - k Number of matrix columns/rows depending on the type of reduction. + * - is_reshaped True if the matrix has been reshaped. + * - scalar Scalar value to multiply each reduced column/row by. + * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value. */ - virtual void configure(const ITensor *input, ITensor *output, int32_t k, bool is_reshaped) = 0; + virtual void configure(const ITensor *input, ITensor *output, const GEMMLowpReductionKernelInfo &info) = 0; protected: const ITensor *_input; ITensor *_output; int32_t _k; bool _is_reshaped; + int32_t _scalar; + bool _mul_by_scalar; }; /** NEON kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A. @@ -75,22 +82,28 @@ public: } /** Initialise the kernel's input and output. * - * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32 - * @param[in] num_mtx_a_cols Number of matrix A columns - * @param[in] is_interleaved4x4 True if the matrix A has been interleaved4x4 + * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32 + * @param[in] info Kernel metadata: + * - k (num_mtx_a_cols) Number of matrix A columns + * - is_reshaped (is_interleaved4x4) True if the matrix A has been interleaved4x4 + * - scalar Scalar value to multiply each reduced row by. + * - mul_byscalar True if each reduced column must be multiplied by a scalar value. */ - void configure(const ITensor *mtx_a, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4) override; + void configure(const ITensor *mtx_a, ITensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override; /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixAReductionKernel * - * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[in] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32 - * @param[in] num_mtx_a_cols Number of matrix A columns - * @param[in] is_interleaved4x4 True if the matrix A has been interleaved4x4 + * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[in] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32 + * @param[in] info Kernel metadata: + * - k (num_mtx_a_cols) Number of matrix A columns + * - is_reshaped (is_interleaved4x4) True if the matrix A has been interleaved4x4 + * - scalar Scalar value to multiply each reduced row by. + * - mul_byscalar True if each reduced column must be multiplied by a scalar value. * * @return a status */ - static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4); + static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; @@ -118,22 +131,28 @@ public: } /** Initialise the kernel's input and output. * - * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32 - * @param[in] num_mtx_b_rows Number of matrix B rows - * @param[in] is_transposed1xW True if the input tensor is transposed 1xW + * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32 + * @param[in] info Kernel metadata: + * - k (num_mtx_b_rows) Number of matrix B rows. + * - is_reshaped (is_transposed1xW) True if the input tensor is transposed 1xW. + * - scalar Scalar value to multiply each reduced row by. + * - mul_byscalar True if each reduced row must be multiplied by a scalar value. */ - void configure(const ITensor *mtx_b, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW) override; + void configure(const ITensor *mtx_b, ITensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override; /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixBReductionKernel * - * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED - * @param[in] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32 - * @param[in] num_mtx_b_rows Number of matrix B rows - * @param[in] is_transposed1xW True if the input tensor is transposed 1xW + * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED + * @param[in] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32 + * @param[in] info Kernel metadata: + * - k (num_mtx_b_rows) Number of matrix B rows. + * - is_reshaped (is_transposed1xW) True if the input tensor is transposed 1xW. + * - scalar Scalar value to multiply each reduced row by. + * - mul_byscalar True if each reduced row must be multiplied by a scalar value. * * @return a status */ - static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW); + static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h index c87e806d0c..8dc6b88bb0 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMM.h +++ b/arm_compute/runtime/NEON/functions/NEGEMM.h @@ -74,7 +74,7 @@ public: * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C]. * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function. * - * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: BLOAT16/F16/F32 + * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: BFLOAT16/F16/F32 * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a * @param[out] d Output tensor. Data type supported: same as @p a @@ -86,7 +86,7 @@ public: void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); /** Static function to check if given info will lead to a valid configuration of @ref NEGEMM. * - * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: BLOAT16/F16/F32 + * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: BFLOAT16/F16/F32 * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a. * @param[in] c Third input tensor info (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. * @param[out] output Output tensor info. Data type supported: same as @p a diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h index 5368384b19..e7da1006e0 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h @@ -66,9 +66,9 @@ public: * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32. * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. - * Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED, FP32 if @p weights is BLOAT16 + * Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED, FP32 if @p weights is BFLOAT16 * @param[out] output Destination tensor. - * Data types supported: Same as @p weights, FP32 if @p weights is BLOAT16 + * Data types supported: Same as @p weights, FP32 if @p weights is BFLOAT16 */ void configure(const ITensor *weights, const ITensor *biases, ITensor *output); /** Static function to check if given info will lead to a valid configuration of @ref NEConvolutionLayerReshapeWeights @@ -76,9 +76,9 @@ public: * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32. * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. - * Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED, FP32 if @p weights is BLOAT16 + * Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED, FP32 if @p weights is BFLOAT16 * @param[in] output Destination tensor. - * Data types supported: Same as @p weights FP32 if @p weights is BLOAT16 + * Data types supported: Same as @p weights FP32 if @p weights is BFLOAT16 * * @return an error status */ @@ -140,7 +140,7 @@ private: /** Basic function to compute the convolution layer. This function calls the following NEON kernels/functions: * * -# @ref NEIm2ColKernel - * -# @ref NEGEMM (if the data type is BLOAT16/FP16/FP32) + * -# @ref NEGEMM (if the data type is BFLOAT16/FP16/FP32) * -# @ref NEGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8/QASYMM8_SIGNED) * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint (if the data type is QASYMM8/QASYMM8_SIGNED) * -# @ref NEArithmeticAdditionKernel (if biases != nullptr and we have a 1x1 convolution with the NHWC data layout) |