From 7d323a6adca97c130a0fc7c6299c75d581906edd Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Wed, 5 Jul 2017 20:05:23 +0100 Subject: COMPMID-440, COMPMID-441 - Port CLConvolutionLayer and CLFullyConnectedLayer to support 16 bit fixed point Change-Id: I8d8ef2cb5ec453eb83fba8d8077550b96ed4bceb Reviewed-on: http://mpd-gerrit.cambridge.arm.com/79837 Reviewed-by: Moritz Pflanzer Reviewed-by: Georgios Pinitas Tested-by: Kaizen --- arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h') diff --git a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h index 8732c6094b..099348fb15 100644 --- a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h +++ b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h @@ -47,7 +47,7 @@ public: /** Set the input and output of the kernel. * * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared, - * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QS8/F16/F32 + * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QS8/QS16/F16/F32 * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with * dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input * @param[out] output The output tensor. Should be a 2D Tensor. Data types supported: Same as @p input -- cgit v1.2.1