From 3f632f3f16e29ebeb7065b30008060fd4bfd09f1 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 22 Aug 2019 16:52:00 +0100 Subject: COMPMID-2418: CLDequantizationLayer support for QASYMM8_PER_CHANNEL Add support for QASYMM8_PER_CHANNEL in CLDequantiazationLayer. Added tests for NHWC and also updated NEON code to work with NHWC data layout. Cleaned up the reference implementation. Change-Id: Ic1d51f16f7f625503fffdbbb66f6487aa588f08c Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/1828 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/runtime/CL/CLTensorAllocator.h | 4 ++-- arm_compute/runtime/CL/functions/CLDequantizationLayer.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arm_compute/runtime') diff --git a/arm_compute/runtime/CL/CLTensorAllocator.h b/arm_compute/runtime/CL/CLTensorAllocator.h index 982cc51274..f7800d39f8 100644 --- a/arm_compute/runtime/CL/CLTensorAllocator.h +++ b/arm_compute/runtime/CL/CLTensorAllocator.h @@ -146,8 +146,8 @@ private: CLMemory _memory; /**< OpenCL memory */ uint8_t *_mapping; /**< Pointer to the CPU mapping of the OpenCL buffer. */ CLTensor *_owner; /**< Owner of the allocator */ - CLFloatArray _scale; - CLInt32Array _offset; + CLFloatArray _scale; /**< Scales array in case of quantized per channel data type */ + CLInt32Array _offset; /**< Offsets array in case of quantized per channel data type */ }; } // namespace arm_compute #endif /* __ARM_COMPUTE_CLTENSORALLOCATOR_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h index ade589d79e..c519311fb1 100644 --- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h @@ -40,13 +40,13 @@ public: /** Set the input and output tensors. * * @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches. - * Data types supported: QASYMM8/QSYMM8/QSYMM16. + * Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32. */ void configure(const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer * - * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16. + * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[in] output Output tensor info. Data type supported: F16/F32. * * @return a status -- cgit v1.2.1