diff options
author | Gian Marco <gianmarco.iodice@arm.com> | 2017-11-17 09:27:57 +0000 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | 6b77e917801b4e979796ea75c538eef740482089 (patch) | |
tree | 0e693ecb1eb0b05018901a992b56781a08b9c266 /arm_compute | |
parent | b3c81cb4100b3a449db5232364e18e649b26df58 (diff) | |
download | ComputeLibrary-6b77e917801b4e979796ea75c538eef740482089.tar.gz |
COMPMID-665 - NEON: Add QASYMM8 in place Activation layer
- Added min and max arguments for QuantizeDownInt32ToUint8Scale in order
to apply bounded relu
- Added support for int32_t biases
- Extended tests
Change-Id: I015dae17faa7284766b5435ca33bcf593c1b2b69
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/96512
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'arm_compute')
3 files changed, 51 insertions, 10 deletions
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h index 04b84339b0..8c1bae9396 100644 --- a/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h +++ b/arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h @@ -35,6 +35,13 @@ class ITensor; * This kernel takes a final int32 accumulator value (the output of @NEGEMMLowpMatrixMultiplyKernel), * and adds to it the offset contribution of matrix A and matrix B in-place. * + * The final result is: + * + * mm_result[i][k] = mm_result[i][k] + + * (vector_sum_col[k] * a_offset) + + * (vector_sum_row[i] * b_offset) + + * (a_offset * b_offset * k) + * */ class NEGEMMLowpOffsetContributionKernel : public INEKernel { diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h index 65f1042b9c..4ec0e9df93 100644 --- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h +++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h @@ -36,7 +36,10 @@ class ITensor; * The following computations will be performed by the kernel: * * -# Add offset terms to final result - * -# Multiply each entry of result and round to nearest integer + * -# Multiply each entry of result by result_mult_int + * -# Add bias to final result if bias tensor is not a nullptr + * -# Shift the int32 accumulator by result_shift + * -# Clamp the value between the specified min and max bounds * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8. * */ @@ -56,22 +59,44 @@ public: /** Initialise the kernel's input and output. * * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8 * @param[in] result_offset Offset to be added to each element of the input matrix * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions */ - void configure(const ITensor *input, ITensor *output, int result_offset, int result_mult_int, int result_shift); + void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; private: - const ITensor *_input; - ITensor *_output; - int32_t _result_offset; - int32_t _result_mult_int; - int32_t _result_shift; + /** Template function to run the NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template <bool is_bounded_relu> + void run(const Window &window); + + /** Common signature for all the specialised NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel functions + * + * @param[in] window Region on which to execute the kernel. + */ + using QuantizeDownFunctionPtr = void (NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::*)(const Window &window); + + QuantizeDownFunctionPtr _func; + const ITensor *_input; + const ITensor *_bias; + ITensor *_output; + int _result_offset; + int _result_mult_int; + int _result_shift; + int _min; + int _max; }; } // namespace arm_compute diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h index 8557ef42e1..a3db23aaee 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h @@ -43,14 +43,18 @@ class ITensor; * NEGEMMLowpQuantizeDownInt32ToUint8Scale depends on 3 parameters: result_offset, result_mult_int, result_shift * The final result is: * - * ((input[i][k] + result_offset) * result_mult_int + rounding) >> result_shift + * ((input[i][k] + result_offset) * result_mult_int) >> result_shift * - * where rounding = (result_shift < 1) ? 0 : (1 << (result_shift - 1)) + * In case the bias tensor is provided, the final result is: + * + * ((input[i][k] + result_offset) * result_mult_int + bias[k]) >> result_shift * * This function calls the following NEON kernels: * * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel * + * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions + * before the result is shifted right by result_shift */ class NEGEMMLowpQuantizeDownInt32ToUint8Scale : public INESimpleFunction { @@ -58,12 +62,17 @@ public: /** Initialise the kernel's inputs, output * * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8 * @param[in] result_offset Offset to be added to each element of the input matrix * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions */ - void configure(const ITensor *input, ITensor *output, int result_offset, int result_mult_int, int result_shift); + void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0); }; } #endif /*__ARM_COMPUTE_NEGEMMLOWPOUTPUTSTAGE_H__ */
\ No newline at end of file |