From ba27e4467dfc04e23ce9483330be062e9aaebdc5 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Tue, 28 May 2019 10:04:57 +0100 Subject: COMPMID-2236: QUANTIZED_16BIT_LSTM operator for NEON Change-Id: I554023508e09b790ecc1bbdada529697d6c7b616 Signed-off-by: giuros01 Reviewed-on: https://review.mlplatform.org/c/1551 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou --- arm_compute/runtime/NEON/functions/NEQuantizationLayer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arm_compute/runtime/NEON/functions/NEQuantizationLayer.h') diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h index 5e4b4f754c..46a62bd903 100644 --- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h @@ -49,13 +49,13 @@ public: /** Set the input and output tensors. * * @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: F32/F16. - * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8 + * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QSYMM16 */ void configure(const ITensor *input, ITensor *output); /** Static function to check if given info will lead to a valid configuration of @ref NEQuantizationLayer * * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32/F16. - * @param[in] output Output tensor info. Data types supported: QASYMM8 + * @param[in] output Output tensor info. Data types supported: QASYMM8/QSYMM16 * * @return a status */ -- cgit v1.2.1