From 10c53f1ef317095ddcd9143bf759cc68ecb0e721 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 17 Jul 2019 16:11:53 +0100 Subject: COMPMID-2307: QUANTIZED_16BIT_LSTM operator for CL Change-Id: I1b52df359f1a368d585fac43a08496544dd2f86f Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/1568 Tested-by: Arm Jenkins Reviewed-by: Giuseppe Rossini Comments-Addressed: Arm Jenkins --- arm_compute/runtime/CL/functions/CLDequantizationLayer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arm_compute/runtime/CL/functions/CLDequantizationLayer.h') diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h index 2f7af01a84..ade589d79e 100644 --- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h @@ -40,13 +40,13 @@ public: /** Set the input and output tensors. * * @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches. - * Data types supported: QASYMM8/QSYMM8. + * Data types supported: QASYMM8/QSYMM8/QSYMM16. * @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32. */ void configure(const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer * - * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8. + * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16. * @param[in] output Output tensor info. Data type supported: F16/F32. * * @return a status -- cgit v1.2.1