From 9e631c204444e7b095510c54819e944f9be8d342 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Tue, 14 May 2019 17:10:40 +0100 Subject: COMPMID-2252 NECast. Change-Id: I7532aea6827a325eb8457132d4787ac527e93cd4 Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/1149 Reviewed-by: Pablo Marquez Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'arm_compute/core/NEON') diff --git a/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h index 16b8e4276f..c900e08424 100644 --- a/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,9 @@ namespace arm_compute { class ITensor; -/** Depth conversion kernel */ +/** Depth conversion kernel + * This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values. + */ class NEDepthConvertLayerKernel : public INEKernel { public: @@ -52,12 +54,13 @@ public: * * Valid conversions Input -> Output : * - * - QASYMM8 -> F16, F32 - * - U8 -> U16, S16, S32 + * - QASYMM8 -> U16, S16, S32, F32, F16 + * - U8 -> U16, S16, S32, F32, F16 * - U16 -> U8, U32 * - S16 -> U8, S32 - * - F16 -> QASYMM8, F32 - * - F32 -> QASYMM8, F16 + * - F16 -> QASYMM8, F32, S32, U8 + * - S32 -> QASYMM8, F16, F32, U8 + * - F32 -> QASYMM8, F16, S32, U8 * * @param[in] input The input tensor to convert. Data types supported: QASYMM8/U8/U16/S16/F16/F32. * @param[out] output The output tensor. Data types supported: QASYMM8/U8/U16/S16/U32/S32/F16/F32. -- cgit v1.2.1