diff options
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h | 4 | ||||
-rw-r--r-- | arm_compute/core/QuantizationInfo.h | 11 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NEDequantizationLayer.h | 4 |
3 files changed, 15 insertions, 4 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h index f0a2a57d1a..3e7feda650 100644 --- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h @@ -52,13 +52,13 @@ public: ~NEDequantizationLayerKernel() = default; /** Set input, output tensors. * - * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8/QSYMM16. + * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32. */ void configure(const ITensor *input, ITensor *output); /** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel * - * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16. + * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[in] output Output tensor info. Data types supported: F16/F32. * * @return a status diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h index 79afca0714..1517d48381 100644 --- a/arm_compute/core/QuantizationInfo.h +++ b/arm_compute/core/QuantizationInfo.h @@ -103,6 +103,17 @@ public: : _scale(scale), _offset() { } + /** Construct quantization info. + * + * @note Used for asymmetric per channel quantization + * + * @param[in] scale Scale. + * @param[in] offset Offset. + */ + QuantizationInfo(std::vector<float> scale, std::vector<int32_t> offset) + : _scale(scale), _offset(offset) + { + } /** Scale vector accessor * * @return A reference to quantization scale metadata diff --git a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h index c08366e5a7..88c8777a68 100644 --- a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h @@ -39,13 +39,13 @@ class NEDequantizationLayer : public INESimpleFunctionNoBorder public: /** Configure the kernel. * - * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8/QSYMM16. + * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32. */ void configure(const ITensor *input, ITensor *output); /** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayer * - * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16. + * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[in] output Output tensor info. Data type supported: F16/F32. * * @return a status |