aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2019-09-10 10:42:27 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-09-16 14:43:54 +0000
commitd87a7b297a5bfc2bad3ba78ea97754d7894e82ef (patch)
treec527ede747d386fe8dd1b39a877ea4e9a19a5fd3 /arm_compute/runtime/CL/functions/CLQuantizationLayer.h
parente874ef9b845424dceeac4211ca9dfec24949f03c (diff)
downloadComputeLibrary-d87a7b297a5bfc2bad3ba78ea97754d7894e82ef.tar.gz
COMPMID-2650: Add support for QASYMM16 in CLQuantizationLayer
Change-Id: I51dda621975f522a65d770304bed0ff0f30d1235 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/1902 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/runtime/CL/functions/CLQuantizationLayer.h')
-rw-r--r--arm_compute/runtime/CL/functions/CLQuantizationLayer.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
index 104f8e2eb2..41a03fdc08 100644
--- a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
@@ -43,13 +43,15 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: F16/32.
- * @param[out] output Destination tensor with the same dimensions of input. Output data type must be QASYMM8.
+ * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM16.
+ *
+ * @note Output auto initialization is not supported by this function
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer
*
* @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F16/32.
- * @param[in] output Output tensor info. Output data type must be QASYMM8.
+ * @param[in] output Output tensor info. Data types supported: QASYMM8/QASYMM16.
*
* @return a status
*/