From 368da83fdd7406d629e8cca64f3eb0af05437419 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Mon, 3 Jul 2017 12:33:49 +0100 Subject: COMPMID-420, COMPMID-414 - Port CLConvolutionLayer and CLFullyConnectedLayer to use 8 bit fixed point Change-Id: I1cb1b4d7711ad7b569ee691e13a5df1b3430292b Reviewed-on: http://mpd-gerrit.cambridge.arm.com/79565 Tested-by: Kaizen Reviewed-by: Georgios Pinitas --- arm_compute/runtime/CL/functions/CLConvolutionLayer.h | 2 +- arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arm_compute/runtime') diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h index 8030b40a71..50a7dc95eb 100644 --- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h @@ -88,7 +88,7 @@ public: * * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], * while every optional dimension from 4 and above represent a batch of inputs. - * Data types supported: F16, F32. + * Data types supported: QS8/F16/F32. * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input. * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input. * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h index 826f445bd8..807ff693bc 100644 --- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h +++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h @@ -50,7 +50,7 @@ public: CLFullyConnectedLayerReshapeWeights(); /** Set the input and output tensors. * - * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/F32. + * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/F16/F32. * @param[out] output Destination tensor. Data type supported: Same as @p input. * @param[in] transpose_weights True if the weights must be transposed. Data types supported: Same as @p weights. * @param[in] is_batched_fc_layer True if it is a batched fully connected layer @@ -85,7 +85,7 @@ public: CLFullyConnectedLayer(); /** Set the input and output tensors. * - * @param[in] input Source tensor. Data type supported: F16/F32. + * @param[in] input Source tensor. Data type supported: QS8/F16/F32. * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input. * @param[out] output Destination tensor. Data type supported: Same as @p input. -- cgit v1.2.1