aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-12-18 18:01:27 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-01-23 15:25:52 +0000
commitb4bb827c67563d2e76f0c0c472556b895b74cee2 (patch)
treed41090f8b529effb5078bb68b728f7b0ca58c2ad /arm_compute
parent71ac9037abce1c6c4af42c485d5395dd6fd79a5a (diff)
downloadComputeLibrary-b4bb827c67563d2e76f0c0c472556b895b74cee2.tar.gz
COMPMID-2772: Add support for QASYMM8_SIGNED in NEPoolingLayer
Change-Id: Ia8ef8f83eb8625a6a609e06dca89d674b07c59cd Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/2628 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h26
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/ext.h17
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/store.h4
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/tbl.h45
-rw-r--r--arm_compute/core/NEON/wrapper/traits.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEPoolingLayer.h6
7 files changed, 89 insertions, 16 deletions
diff --git a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
index b36e10cd40..654dfad701 100644
--- a/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,7 +54,7 @@ public:
*
* @note F16 are supported for pool sizes 2 and 3 only
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
@@ -63,7 +63,7 @@ public:
*
* @note F16 are supported for pool sizes 2 and 3 only
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
@@ -148,38 +148,42 @@ private:
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
void poolingMxN_f16_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform 2x2 pooling for 8bit asymmetric fixed point.
+ /** Template function to perform 2x2 pooling for 8bit quantized fixed point. (NCHW)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void pooling2_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform 3x3 pooling for 8bit quantized fixed point.
+ template <typename T>
+ void pooling2_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ /** Template function to perform 3x3 pooling for 8bit quantized fixed point. (NCHW)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void pooling3_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform MxN pooling for 8-bit quantized.
+ template <typename T>
+ void pooling3_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ /** Template function to perform MxN pooling for 8-bit quantized. (NCHW)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void poolingMxN_qasymm8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
- /** Function to perform MxN pooling for 8-bit quantized. (NHWC)
+ template <typename T>
+ void poolingMxN_q8_nchw(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ /** Template function to perform MxN pooling for 8-bit quantized. (NHWC)
*
* @param[in] window_input Input region on which to execute the kernel.
* @param[in] window Output region on which to execute the kernel.
* @param[in] pooling_type Pooling operation to be computed.
* @param[in] exclude_padding Flag to specify exclusion of padding from the operation.
*/
- void poolingMxN_qasymm8_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
+ template <typename T>
+ void poolingMxN_q8_nhwc(const Window &window_input, const Window &window, PoolingType pooling_type, bool exclude_padding = false);
/** Common signature for all the specialised Pooling functions
*
* @param[in] window_input Input region on which to execute the kernel.
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/ext.h b/arm_compute/core/NEON/wrapper/intrinsics/ext.h
index 70bc91aaa6..f2c3dcc901 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/ext.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/ext.h
@@ -36,6 +36,23 @@ namespace wrapper
return prefix##_##postfix(value_a, value_b, size); \
}
+VEXT_IMPL(uint8x8_t, vext, u8, 1)
+VEXT_IMPL(uint8x8_t, vext, u8, 2)
+VEXT_IMPL(int8x8_t, vext, s8, 1)
+VEXT_IMPL(int8x8_t, vext, s8, 2)
+VEXT_IMPL(uint16x4_t, vext, u16, 1)
+VEXT_IMPL(uint16x4_t, vext, u16, 2)
+VEXT_IMPL(int16x4_t, vext, s16, 1)
+VEXT_IMPL(int16x4_t, vext, s16, 2)
+
+VEXT_IMPL(uint8x16_t, vextq, u8, 1)
+VEXT_IMPL(uint8x16_t, vextq, u8, 2)
+VEXT_IMPL(int8x16_t, vextq, s8, 1)
+VEXT_IMPL(int8x16_t, vextq, s8, 2)
+VEXT_IMPL(uint16x8_t, vextq, u16, 1)
+VEXT_IMPL(uint16x8_t, vextq, u16, 2)
+VEXT_IMPL(int16x8_t, vextq, s16, 1)
+VEXT_IMPL(int16x8_t, vextq, s16, 2)
VEXT_IMPL(int32x4_t, vextq, s32, 1)
VEXT_IMPL(int32x4_t, vextq, s32, 2)
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 26b4322fa4..a7af352c76 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -66,5 +66,6 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/sub.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/tanh.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/tbl.h"
#endif /* ARM_COMPUTE_WRAPPER_INTRINSICS_H */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/store.h b/arm_compute/core/NEON/wrapper/intrinsics/store.h
index 0fdc46b7fa..eb2ae6a5e1 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/store.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/store.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,9 @@ namespace wrapper
}
VSTORE_IMPL(uint8_t, uint8x8_t, vst1, u8)
+VSTORE_IMPL(uint8_t, uint8x8x2_t, vst2, u8)
VSTORE_IMPL(int8_t, int8x8_t, vst1, s8)
+VSTORE_IMPL(int8_t, int8x8x2_t, vst2, s8)
VSTORE_IMPL(uint16_t, uint16x4_t, vst1, u16)
VSTORE_IMPL(int16_t, int16x4_t, vst1, s16)
VSTORE_IMPL(uint32_t, uint32x2_t, vst1, u32)
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/tbl.h b/arm_compute/core/NEON/wrapper/intrinsics/tbl.h
new file mode 100644
index 0000000000..d3d6b72e6a
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/tbl.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_TBL_H
+#define ARM_COMPUTE_WRAPPER_TBL_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VTBL_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vtbl(const stype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VTBL_IMPL(uint8x8x2_t, uint8x8_t, vtbl2, u8)
+VTBL_IMPL(int8x8x2_t, int8x8_t, vtbl2, s8)
+
+#undef VTBL_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_TBL_H */
diff --git a/arm_compute/core/NEON/wrapper/traits.h b/arm_compute/core/NEON/wrapper/traits.h
index 0a9015e3e9..ae77d2778c 100644
--- a/arm_compute/core/NEON/wrapper/traits.h
+++ b/arm_compute/core/NEON/wrapper/traits.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,6 +42,7 @@ struct vector_128_tag {};
/** Create the appropriate NEON vector given its type and size in terms of elements */
template <typename T, int S> struct neon_vector;
+
// Specializations
#ifndef DOXYGEN_SKIP_THIS
template <> struct neon_vector<uint8_t, 8>{ using scalar_type = uint8_t; using type = uint8x8_t; using tag_type = vector_64_tag; };
@@ -51,7 +52,9 @@ template <> struct neon_vector<int8_t, 16>{ using scalar_type = int8_t; using ty
template <> struct neon_vector<uint16_t, 4>{ using scalar_type = uint16_t; using type = uint16x4_t; using tag_type = vector_64_tag; };
template <> struct neon_vector<int16_t, 4>{ using scalar_type = int16_t; using type = int16x4_t; using tag_type = vector_64_tag; };
template <> struct neon_vector<uint16_t, 8>{ using scalar_type = uint16_t; using type = uint16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint16_t, 16>{ using scalar_type = uint16_t; using type = uint16x8x2_t; };
template <> struct neon_vector<int16_t, 8>{ using scalar_type = int16_t; using type = int16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int16_t, 16>{ using scalar_type = int16_t; using type = int16x8x2_t; };
template <> struct neon_vector<uint32_t, 2>{ using scalar_type = uint32_t; using type = uint32x2_t; using tag_type = vector_64_tag; };
template <> struct neon_vector<int32_t, 2>{ using scalar_type = int32_t; using type = int32x2_t; using tag_type = vector_64_tag; };
template <> struct neon_vector<uint32_t, 4>{ using scalar_type = uint32_t; using type = uint32x4_t; using tag_type = vector_128_tag; };
@@ -62,6 +65,7 @@ template <> struct neon_vector<uint64_t, 2>{ using scalar_type = uint64_t; using
template <> struct neon_vector<int64_t, 2>{ using scalar_type = int64_t; using type = int64x2_t; using tag_type = vector_128_tag; };
template <> struct neon_vector<float_t, 2>{ using scalar_type = float_t; using type = float32x2_t; using tag_type = vector_64_tag; };
template <> struct neon_vector<float_t, 4>{ using scalar_type = float_t; using type = float32x4_t; using tag_type = vector_128_tag; };
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
template <> struct neon_vector<float16_t, 4>{ using scalar_type = float16_t; using type = float16x4_t; using tag_type = vector_64_tag; };
template <> struct neon_vector<float16_t, 8>{ using scalar_type = float16_t; using type = float16x8_t; using tag_type = vector_128_tag; };
diff --git a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
index ae08ce636a..eb840b52f2 100644
--- a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,7 @@ public:
*
* @note F16 is supported for pool sizes 2 and 3 only
*
- * @param[in, out] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
+ * @param[in, out] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
@@ -57,7 +57,7 @@ public:
*
* @note F16 is supported for pool sizes 2 and 3 only
*
- * @param[in] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*