diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2019-10-01 12:25:49 +0100 |
---|---|---|
committer | Michele Di Giorgio <michele.digiorgio@arm.com> | 2019-10-01 17:26:16 +0000 |
commit | d64a46c6dfa81ce4607fc3de57bc9d9ac7e01e4a (patch) | |
tree | e4b2a1e670a6002cd70e920ad7043c090b5d25f1 /arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h | |
parent | 79f88e6d825402388bb79fc123ee2dfe01985bda (diff) | |
download | ComputeLibrary-d64a46c6dfa81ce4607fc3de57bc9d9ac7e01e4a.tar.gz |
COMPMID-2699: Add support for QASYMM16 in NEQuantizationLayer
Change-Id: Icb968e37551a9048040e9aaff5329e874c53a2ee
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2016
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h')
-rw-r--r-- | arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h index 391a72c6db..e1aaad5094 100644 --- a/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h @@ -57,13 +57,15 @@ public: /** Set the input, output. * * @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: F32/F16. - * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8. + * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM16. + * + * @note Output auto initialization is not supported by this kernel */ void configure(const ITensor *input, ITensor *output); /** Static function to check if given info will lead to a valid configuration of @ref NEQuantizationLayerKernel * * @param[in] input Input tensor info. Data types supported: F32/F16. - * @param[in] output Output tensor info. Data types supported: QASYMM8. + * @param[in] output Output tensor info. Data types supported: QASYMM8/QASYMM16. * * @return a status */ @@ -73,11 +75,28 @@ public: void run(const Window &window, const ThreadInfo &info) override; private: + /** Common signature for all the specialised @ref NEQuantizationLayerKernel functions + * + * @param[in] window Region on which to execute the kernel. + */ + using QuantizationFunctionExecutorPtr = void (NEQuantizationLayerKernel::*)(const Window &window); + /** Function to apply QASYMM8 quantization on a tensor. + * + * @param[in] window Region on which to execute the kernel. + */ template <typename T> - void quantize(const Window &window, const QuantizationInfo &qinfo); + void run_quantize_qasymm8(const Window &window); + /** Function to apply QASYMM16 quantization on a tensor. + * + * @param[in] window Region on which to execute the kernel. + */ + template <typename T> + void run_quantize_qasymm16(const Window &window); const ITensor *_input; ITensor *_output; + + QuantizationFunctionExecutorPtr _func; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_NEQUANTIZATIONLAYERKERNEL_H__ */ |