From 29a01c90fc372d31188ab7157b45b32ce24fa9b3 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 22 Aug 2019 11:44:04 +0100 Subject: COMPMID-2417: NEDequantizationLayer support for QASYMM8_PER_CHANNEL Change-Id: I1ef4ce8610e11e81702b0b7f0f7c437fed49833e Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/1795 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h | 4 ++-- arm_compute/core/QuantizationInfo.h | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'arm_compute/core') diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h index f0a2a57d1a..3e7feda650 100644 --- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h @@ -52,13 +52,13 @@ public: ~NEDequantizationLayerKernel() = default; /** Set input, output tensors. * - * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8/QSYMM16. + * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32. */ void configure(const ITensor *input, ITensor *output); /** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel * - * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16. + * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[in] output Output tensor info. Data types supported: F16/F32. * * @return a status diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h index 79afca0714..1517d48381 100644 --- a/arm_compute/core/QuantizationInfo.h +++ b/arm_compute/core/QuantizationInfo.h @@ -103,6 +103,17 @@ public: : _scale(scale), _offset() { } + /** Construct quantization info. + * + * @note Used for asymmetric per channel quantization + * + * @param[in] scale Scale. + * @param[in] offset Offset. + */ + QuantizationInfo(std::vector scale, std::vector offset) + : _scale(scale), _offset(offset) + { + } /** Scale vector accessor * * @return A reference to quantization scale metadata -- cgit v1.2.1