From f87cc7f6fef95f9b022725304118796a6a764a7c Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Wed, 26 Jul 2017 10:28:40 +0100 Subject: COMPMID-417: Port NEDirectConvolution 1x1 to QS16. Change-Id: Icae6a5091e836d0aca24375f43cca9e6d3a2090f Reviewed-on: http://mpd-gerrit.cambridge.arm.com/81662 Reviewed-by: Moritz Pflanzer Tested-by: Kaizen Reviewed-by: Anthony Barbier --- .../core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h') diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h index f098e18655..87788ba389 100644 --- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h +++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerBiasAccumulateKernel.h @@ -51,7 +51,7 @@ public: /** Set the accumulate buffer and the biases of the kernel. * * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. - * Data type supported: QS8/F32 + * Data type supported: QS8/QS16/F16/F32 * @param[in] bias The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) * Data type supported: Same as @p input -- cgit v1.2.1