aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-12-18 18:01:27 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-01-23 15:25:52 +0000
commitb4bb827c67563d2e76f0c0c472556b895b74cee2 (patch)
treed41090f8b529effb5078bb68b728f7b0ca58c2ad /arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
parent71ac9037abce1c6c4af42c485d5395dd6fd79a5a (diff)
downloadComputeLibrary-b4bb827c67563d2e76f0c0c472556b895b74cee2.tar.gz
COMPMID-2772: Add support for QASYMM8_SIGNED in NEPoolingLayer
Change-Id: Ia8ef8f83eb8625a6a609e06dca89d674b07c59cd Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/2628 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h')
-rw-r--r--arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h26
1 files changed, 15 insertions, 11 deletions
diff --git a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
index b36e10cd40..654dfad701 100644
--- a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,7 +54,7 @@ public:
*
* @note F16 are supported for pool sizes 2 and 3 only
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
@@ -63,7 +63,7 @@ public:
*
* @note F16 are supported for pool sizes 2 and 3 only
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
@@ -148,38 +148,42 @@ private:
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
void poolingMxN_f16_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform 2x2 pooling for 8bit asymmetric fixed point.
+ /** Template function to perform 2x2 pooling for 8bit quantized fixed point. (NCHW)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void pooling2_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform 3x3 pooling for 8bit quantized fixed point.
+ template <typename T>
+ void pooling2_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ /** Template function to perform 3x3 pooling for 8bit quantized fixed point. (NCHW)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void pooling3_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform MxN pooling for 8-bit quantized.
+ template <typename T>
+ void pooling3_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ /** Template function to perform MxN pooling for 8-bit quantized. (NCHW)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void poolingMxN_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform MxN pooling for 8-bit quantized. (NHWC)
+ template <typename T>
+ void poolingMxN_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ /** Template function to perform MxN pooling for 8-bit quantized. (NHWC)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void poolingMxN_qasymm8_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ template <typename T>
+ void poolingMxN_q8_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
/** Common signature for all the specialised Pooling functions
*
* @param[in] window_input Input region on which to execute the kernel.