aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h')
-rw-r--r--arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h39
1 files changed, 20 insertions, 19 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
index 3f41edc5aa..b7632d70c4 100644
--- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#ifndef ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H
#define ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H
+#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/NEON/INEKernel.h"
namespace arm_compute
@@ -32,6 +33,8 @@ class ITensor;
/** NEON kernel to accumulate the biases, if provided, or downscale in case of quantized input.
*
* @note We assume bias to be shared
+ * @note For quantized computations (i.e. @p input of S32 type) the output data type for auto-initialization must be passed as part
+ * of the @ref DirectConvolutionLayerOutputStageKernelInfo.
*/
class NEDirectConvolutionLayerOutputStageKernel : public INEKernel
{
@@ -54,32 +57,30 @@ public:
~NEDirectConvolutionLayerOutputStageKernel() = default;
/** Set the accumulate buffer and the biases of the kernel.
*
- * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: F16/F32
- * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
- * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
- * Data type supported: F16/F32
- * @param[in] result_fixedpoint_multiplier (Optional) Fixed point value to be multiplied to each element of the input matrix once the result_offset has been added
- * @param[in] result_shift (Optional) Integer value used to round the result of the fixed point multiplication to nearest division by a power-of-two
- * @param[in] result_offset_after_shift (Optional) Offset to be applied to result before converting it back to QASYMM8
+ * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
+ * Data type supported: F16/F32/S32
+ * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
+ * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
+ * Note that in-place computation is only supported for F16/F32. For S32 this must not be nullptr.
+ * Data type supported: F16/F32 or QASYMM8/QASYMM8_SIGNED if @p input is S32
+ * @param[in] info (Optional) DirectConvolutionLayerOutputStageKernel descriptor metadata
*/
void configure(ITensor *input, const ITensor *bias = nullptr, ITensor *output = nullptr,
- int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0);
+ const DirectConvolutionLayerOutputStageKernelInfo &info = DirectConvolutionLayerOutputStageKernelInfo());
/** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerOutputStageKernel
*
- * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: F16/F32
- * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
- * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
- * Data type supported: F16/F32
- * @param[in] result_fixedpoint_multiplier (Optional) Fixed point value to be multiplied to each element of the input matrix once the result_offset has been added
- * @param[in] result_shift (Optional) Integer value used to round the result of the fixed point multiplication to nearest division by a power-of-two
- * @param[in] result_offset_after_shift (Optional) Offset to be applied to result before converting it back to QASYMM8
+ * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
+ * Data type supported: F16/F32/S32
+ * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
+ * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
+ * Note that in-place computation is only supported for F16/F32. For S32 this must not be nullptr.
+ * Data type supported: F16/F32 or QASYMM8/QASYMM8_SIGNED if @p input is S32
+ * @param[in] info (Optional) DirectConvolutionLayerOutputStageKernel descriptor metadata
*
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr,
- int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0);
+ const DirectConvolutionLayerOutputStageKernelInfo &info = DirectConvolutionLayerOutputStageKernelInfo());
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;