diff options
author | Usama Arif <usama.arif@arm.com> | 2019-05-14 17:10:40 +0100 |
---|---|---|
committer | Usama Arif <usama.arif@arm.com> | 2019-05-23 13:26:03 +0000 |
commit | 9e631c204444e7b095510c54819e944f9be8d342 (patch) | |
tree | a6ebd7168d206cf18c46e1ceef29365024751767 /arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h | |
parent | dd0bf484a3a34dff17757b5e7a4b6be3b1682a29 (diff) | |
download | ComputeLibrary-9e631c204444e7b095510c54819e944f9be8d342.tar.gz |
COMPMID-2252 NECast.
Change-Id: I7532aea6827a325eb8457132d4787ac527e93cd4
Signed-off-by: Usama Arif <usama.arif@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1149
Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h')
-rw-r--r-- | arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h index 16b8e4276f..c900e08424 100644 --- a/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,9 @@ namespace arm_compute { class ITensor; -/** Depth conversion kernel */ +/** Depth conversion kernel + * This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values. + */ class NEDepthConvertLayerKernel : public INEKernel { public: @@ -52,12 +54,13 @@ public: * * Valid conversions Input -> Output : * - * - QASYMM8 -> F16, F32 - * - U8 -> U16, S16, S32 + * - QASYMM8 -> U16, S16, S32, F32, F16 + * - U8 -> U16, S16, S32, F32, F16 * - U16 -> U8, U32 * - S16 -> U8, S32 - * - F16 -> QASYMM8, F32 - * - F32 -> QASYMM8, F16 + * - F16 -> QASYMM8, F32, S32, U8 + * - S32 -> QASYMM8, F16, F32, U8 + * - F32 -> QASYMM8, F16, S32, U8 * * @param[in] input The input tensor to convert. Data types supported: QASYMM8/U8/U16/S16/F16/F32. * @param[out] output The output tensor. Data types supported: QASYMM8/U8/U16/S16/U32/S32/F16/F32. |