From 4536193e78570989c1b54f1c5d57627f29f9d400 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 19 Dec 2019 13:53:44 +0000 Subject: COMPMID-2801: Add support for QASYMM8_SIGNED in NEDirectConvolutionLayerOutputStageKernel Change-Id: Ib047dd1024b8ecac60e2d368cb161ca418c933ff Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/2503 Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/core/KernelDescriptors.h | 11 +++++- .../NEDirectConvolutionLayerOutputStageKernel.h | 39 +++++++++++----------- 2 files changed, 30 insertions(+), 20 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h index f358153b0d..d009ccc73d 100644 --- a/arm_compute/core/KernelDescriptors.h +++ b/arm_compute/core/KernelDescriptors.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -83,5 +83,14 @@ struct SoftmaxKernelInfo bool is_log{ false }; /**< Flag used to perform Log Softmax operation */ DataType input_data_type{ DataType::UNKNOWN }; /**< Input tensor data type */ }; + +/** Descriptor used by the direct convolution layer output stage kernels */ +struct DirectConvolutionLayerOutputStageKernelInfo +{ + int32_t result_fixedpoint_multiplier{ 0 }; /**< Result output stage multiplier used for quantizing */ + int32_t result_shift{ 0 }; /**< Result output stage shift used for quantizing */ + int32_t result_offset_after_shift{ 0 }; /**< Result offset used for quantizing */ + DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */ +}; } // namespace arm_compute #endif /* ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H */ diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h index 3f41edc5aa..b7632d70c4 100644 --- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h +++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,6 +24,7 @@ #ifndef ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H #define ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H +#include "arm_compute/core/KernelDescriptors.h" #include "arm_compute/core/NEON/INEKernel.h" namespace arm_compute @@ -32,6 +33,8 @@ class ITensor; /** NEON kernel to accumulate the biases, if provided, or downscale in case of quantized input. * * @note We assume bias to be shared + * @note For quantized computations (i.e. @p input of S32 type) the output data type for auto-initialization must be passed as part + * of the @ref DirectConvolutionLayerOutputStageKernelInfo. */ class NEDirectConvolutionLayerOutputStageKernel : public INEKernel { @@ -54,32 +57,30 @@ public: ~NEDirectConvolutionLayerOutputStageKernel() = default; /** Set the accumulate buffer and the biases of the kernel. * - * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. - * Data type supported: F16/F32 - * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input - * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) - * Data type supported: F16/F32 - * @param[in] result_fixedpoint_multiplier (Optional) Fixed point value to be multiplied to each element of the input matrix once the result_offset has been added - * @param[in] result_shift (Optional) Integer value used to round the result of the fixed point multiplication to nearest division by a power-of-two - * @param[in] result_offset_after_shift (Optional) Offset to be applied to result before converting it back to QASYMM8 + * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. + * Data type supported: F16/F32/S32 + * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input + * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) + * Note that in-place computation is only supported for F16/F32. For S32 this must not be nullptr. + * Data type supported: F16/F32 or QASYMM8/QASYMM8_SIGNED if @p input is S32 + * @param[in] info (Optional) DirectConvolutionLayerOutputStageKernel descriptor metadata */ void configure(ITensor *input, const ITensor *bias = nullptr, ITensor *output = nullptr, - int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0); + const DirectConvolutionLayerOutputStageKernelInfo &info = DirectConvolutionLayerOutputStageKernelInfo()); /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerOutputStageKernel * - * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. - * Data type supported: F16/F32 - * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input - * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) - * Data type supported: F16/F32 - * @param[in] result_fixedpoint_multiplier (Optional) Fixed point value to be multiplied to each element of the input matrix once the result_offset has been added - * @param[in] result_shift (Optional) Integer value used to round the result of the fixed point multiplication to nearest division by a power-of-two - * @param[in] result_offset_after_shift (Optional) Offset to be applied to result before converting it back to QASYMM8 + * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. + * Data type supported: F16/F32/S32 + * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input + * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) + * Note that in-place computation is only supported for F16/F32. For S32 this must not be nullptr. + * Data type supported: F16/F32 or QASYMM8/QASYMM8_SIGNED if @p input is S32 + * @param[in] info (Optional) DirectConvolutionLayerOutputStageKernel descriptor metadata * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr, - int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0); + const DirectConvolutionLayerOutputStageKernelInfo &info = DirectConvolutionLayerOutputStageKernelInfo()); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; -- cgit v1.2.1