aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/wrapper
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/wrapper')
-rw-r--r--src/core/NEON/wrapper/intrinsics/cgtz.h62
-rw-r--r--src/core/NEON/wrapper/intrinsics/cvt.h61
-rw-r--r--src/core/NEON/wrapper/intrinsics/div.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/erf.h52
-rw-r--r--src/core/NEON/wrapper/intrinsics/exp.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/getlane.h14
-rw-r--r--src/core/NEON/wrapper/intrinsics/intrinsics.h18
-rw-r--r--src/core/NEON/wrapper/intrinsics/inv.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/invsqrt.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/log.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/max.h41
-rw-r--r--src/core/NEON/wrapper/intrinsics/mla.h16
-rw-r--r--src/core/NEON/wrapper/intrinsics/pow.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/qmov.h6
-rw-r--r--src/core/NEON/wrapper/intrinsics/reinterpret.h4
-rw-r--r--src/core/NEON/wrapper/intrinsics/round.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/setlane.h12
-rw-r--r--src/core/NEON/wrapper/intrinsics/shr.h148
-rw-r--r--src/core/NEON/wrapper/intrinsics/sin.h3
-rw-r--r--src/core/NEON/wrapper/intrinsics/store.h6
-rw-r--r--src/core/NEON/wrapper/intrinsics/sub.h17
-rw-r--r--src/core/NEON/wrapper/intrinsics/svcnt.h68
-rw-r--r--src/core/NEON/wrapper/intrinsics/svcvt.h77
-rw-r--r--src/core/NEON/wrapper/intrinsics/svdup_n.h59
-rw-r--r--src/core/NEON/wrapper/intrinsics/svexp.h50
-rw-r--r--src/core/NEON/wrapper/intrinsics/svlog.h48
-rw-r--r--src/core/NEON/wrapper/intrinsics/svpow.h55
-rw-r--r--src/core/NEON/wrapper/intrinsics/svptrue.h68
-rw-r--r--src/core/NEON/wrapper/intrinsics/svqadd.h60
-rw-r--r--src/core/NEON/wrapper/intrinsics/svreinterpret.h57
-rw-r--r--src/core/NEON/wrapper/intrinsics/svsin.h47
-rw-r--r--src/core/NEON/wrapper/intrinsics/svwhilelt.h73
-rw-r--r--src/core/NEON/wrapper/intrinsics/tanh.h1
-rw-r--r--src/core/NEON/wrapper/scalar/add.h12
-rw-r--r--src/core/NEON/wrapper/scalar/sub.h12
-rw-r--r--src/core/NEON/wrapper/svtraits.h74
-rw-r--r--src/core/NEON/wrapper/traits.h35
-rw-r--r--src/core/NEON/wrapper/wrapper.h10
38 files changed, 1195 insertions, 78 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/cgtz.h b/src/core/NEON/wrapper/intrinsics/cgtz.h
new file mode 100644
index 0000000000..025a7ba976
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/cgtz.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CGTZ_H
+#define ARM_COMPUTE_WRAPPER_CGTZ_H
+
+#ifdef __aarch64__
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCGTZ_IMPL(vtype, rtype, prefix, postfix) \
+ inline rtype vcgtz(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VCGTZ_IMPL(int8x8_t, uint8x8_t, vcgtz, s8)
+VCGTZ_IMPL(int16x4_t, uint16x4_t, vcgtz, s16)
+VCGTZ_IMPL(int32x2_t, uint32x2_t, vcgtz, s32)
+VCGTZ_IMPL(float32x2_t, uint32x2_t, vcgtz, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGTZ_IMPL(float16x4_t, uint16x4_t, vcgtz, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCGTZ_IMPL(int8x16_t, uint8x16_t, vcgtzq, s8)
+VCGTZ_IMPL(int16x8_t, uint16x8_t, vcgtzq, s16)
+VCGTZ_IMPL(int32x4_t, uint32x4_t, vcgtzq, s32)
+VCGTZ_IMPL(float32x4_t, uint32x4_t, vcgtzq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGTZ_IMPL(float16x8_t, uint16x8_t, vcgtzq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCGTZ_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif // __aarch64__
+#endif /* ARM_COMPUTE_WRAPPER_CGTZ_H */
diff --git a/src/core/NEON/wrapper/intrinsics/cvt.h b/src/core/NEON/wrapper/intrinsics/cvt.h
index 6e79a92bc2..381de2284a 100644
--- a/src/core/NEON/wrapper/intrinsics/cvt.h
+++ b/src/core/NEON/wrapper/intrinsics/cvt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020, 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,12 +30,11 @@ namespace arm_compute
{
namespace wrapper
{
-#define VCVT_TO_F32_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
- template <typename T> \
- inline typename std::enable_if<std::is_same<T, float>::value, float32x4_t>::type \
- vcvt(const vtype &a) \
- { \
- return prefix##_##postfix1##_##postfix2(a); \
+#define VCVT_TO_F32_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float>::value, float32x4_t>::type vcvt(const vtype &a) \
+ { \
+ return prefix##_##postfix1##_##postfix2(a); \
}
VCVT_TO_F32_IMPL(float32x4_t, uint32x4_t, vcvtq, f32, u32)
@@ -46,12 +45,11 @@ VCVT_TO_F32_IMPL(float32x4_t, float16x4_t, vcvt, f32, f16)
#undef VCVT_TO_F32_IMPL
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-#define VCVT_TO_F16_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
- template <typename T> \
- inline typename std::enable_if<std::is_same<T, float16_t>::value, float16x4_t>::type \
- vcvt(const vtype &a) \
- { \
- return prefix##_##postfix1##_##postfix2(a); \
+#define VCVT_TO_F16_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float16_t>::value, float16x4_t>::type vcvt(const vtype &a) \
+ { \
+ return prefix##_##postfix1##_##postfix2(a); \
}
VCVT_TO_F16_IMPL(float16x4_t, float32x4_t, vcvt, f16, f32)
@@ -59,20 +57,34 @@ VCVT_TO_F16_IMPL(float16x4_t, float32x4_t, vcvt, f16, f32)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
template <typename T>
-inline typename std::enable_if<std::is_same<T, uint8_t>::value, uint32x4_t>::type
+inline typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, uint32_t>::value, uint32x4_t>::type
vcvt(const float32x4_t &a)
{
return vcvtq_u32_f32(a);
}
template <typename T>
-inline typename std::enable_if<std::is_same<T, int8_t>::value, int32x4_t>::type
+inline typename std::enable_if<std::is_same<T, int8_t>::value || std::is_same<T, int32_t>::value, int32x4_t>::type
vcvt(const float32x4_t &a)
{
return vcvtq_s32_f32(a);
}
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
+#ifdef __aarch64__
+template <typename T>
+inline typename std::enable_if<std::is_same<T, uint32_t>::value, uint32x4_t>::type vcvta(const float32x4_t &a)
+{
+ return vcvtaq_u32_f32(a);
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, int32_t>::value, int32x4_t>::type vcvta(const float32x4_t &a)
+{
+ return vcvtaq_s32_f32(a);
+}
+#endif //__aarch64__
+
+#if defined(ARM_COMPUTE_ENABLE_BF16)
/** Convert 2x128-bit floating point vectors into 1x128-bit bfloat16 vector
*
* @param[in] inptr Pointer to the input memory to load values from
@@ -80,16 +92,15 @@ vcvt(const float32x4_t &a)
*/
inline void vcvt_bf16_f32(const float *inptr, uint16_t *outptr)
{
- __asm __volatile(
- "ldp q0, q1, [%[inptr]]\n"
- ".inst 0xea16800\n" // BFCVTN v0, v0
- ".inst 0x4ea16820\n" // BFCVTN2 v0, v1
- "str q0, [%[outptr]]\n"
- : [inptr] "+r"(inptr)
- : [outptr] "r"(outptr)
- : "v0", "v1", "memory");
+ __asm __volatile("ldp q0, q1, [%[inptr]]\n"
+ ".inst 0xea16800\n" // BFCVTN v0, v0
+ ".inst 0x4ea16820\n" // BFCVTN2 v0, v1
+ "str q0, [%[outptr]]\n"
+ : [inptr] "+r"(inptr)
+ : [outptr] "r"(outptr)
+ : "v0", "v1", "memory");
}
-#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
+#endif /* defined(ARM_COMPUTE_ENABLE_BF16) */
} // namespace wrapper
} // namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/div.h b/src/core/NEON/wrapper/intrinsics/div.h
index 265f30d33b..ece991a5b0 100644
--- a/src/core/NEON/wrapper/intrinsics/div.h
+++ b/src/core/NEON/wrapper/intrinsics/div.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_DIV_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/erf.h b/src/core/NEON/wrapper/intrinsics/erf.h
new file mode 100644
index 0000000000..0e34462b96
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/erf.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_WRAPPER_ERF_H
+#define ARM_COMPUTE_WRAPPER_ERF_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VERF_IMPL(vtype, prefix, postfix) \
+ inline vtype verf(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VERF_IMPL(float32x4_t, verfq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VERF_IMPL(float16x8_t, verfq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VERF_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_WRAPPER_ERF_H */
diff --git a/src/core/NEON/wrapper/intrinsics/exp.h b/src/core/NEON/wrapper/intrinsics/exp.h
index c2a6970967..f44577b926 100644
--- a/src/core/NEON/wrapper/intrinsics/exp.h
+++ b/src/core/NEON/wrapper/intrinsics/exp.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_EXP_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/getlane.h b/src/core/NEON/wrapper/intrinsics/getlane.h
index 2052751612..ae813bb2fa 100644
--- a/src/core/NEON/wrapper/intrinsics/getlane.h
+++ b/src/core/NEON/wrapper/intrinsics/getlane.h
@@ -33,7 +33,7 @@ namespace wrapper
#define VGETLANE_IMPL_8(stype, vtype, postfix) \
inline stype vgetlane(const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vget_lane_##postfix(vector, 0); \
@@ -59,7 +59,7 @@ namespace wrapper
#define VGETLANE_IMPL_4(stype, vtype, postfix) \
inline stype vgetlane(const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vget_lane_##postfix(vector, 0); \
@@ -77,7 +77,7 @@ namespace wrapper
#define VGETLANE_IMPL_2(stype, vtype, postfix) \
inline stype vgetlane(const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vget_lane_##postfix(vector, 0); \
@@ -102,7 +102,7 @@ VGETLANE_IMPL_4(float16_t, float16x4_t, f16)
#define VGETQLANE_IMPL_16(stype, vtype, postfix) \
inline stype vgetlane(const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vgetq_lane_##postfix(vector, 0); \
@@ -144,7 +144,7 @@ VGETLANE_IMPL_4(float16_t, float16x4_t, f16)
#define VGETQLANE_IMPL_8(stype, vtype, postfix) \
inline stype vgetlane(const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vgetq_lane_##postfix(vector, 0); \
@@ -170,7 +170,7 @@ VGETLANE_IMPL_4(float16_t, float16x4_t, f16)
#define VGETQLANE_IMPL_4(stype, vtype, postfix) \
inline stype vgetlane(const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vgetq_lane_##postfix(vector, 0); \
@@ -188,7 +188,7 @@ VGETLANE_IMPL_4(float16_t, float16x4_t, f16)
#define VGETQLANE_IMPL_2(stype, vtype, postfix) \
inline stype vgetlane(const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vgetq_lane_##postfix(vector, 0); \
diff --git a/src/core/NEON/wrapper/intrinsics/intrinsics.h b/src/core/NEON/wrapper/intrinsics/intrinsics.h
index 070f3c7065..97975ebe7c 100644
--- a/src/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/src/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,6 +31,7 @@
#include "src/core/NEON/wrapper/intrinsics/ceq.h"
#include "src/core/NEON/wrapper/intrinsics/cge.h"
#include "src/core/NEON/wrapper/intrinsics/cgt.h"
+#include "src/core/NEON/wrapper/intrinsics/cgtz.h"
#include "src/core/NEON/wrapper/intrinsics/cle.h"
#include "src/core/NEON/wrapper/intrinsics/clt.h"
#include "src/core/NEON/wrapper/intrinsics/combine.h"
@@ -38,6 +39,7 @@
#include "src/core/NEON/wrapper/intrinsics/div.h"
#include "src/core/NEON/wrapper/intrinsics/dup_n.h"
#include "src/core/NEON/wrapper/intrinsics/eor.h"
+#include "src/core/NEON/wrapper/intrinsics/erf.h"
#include "src/core/NEON/wrapper/intrinsics/exp.h"
#include "src/core/NEON/wrapper/intrinsics/ext.h"
#include "src/core/NEON/wrapper/intrinsics/gethigh.h"
@@ -65,6 +67,7 @@
#include "src/core/NEON/wrapper/intrinsics/rev64.h"
#include "src/core/NEON/wrapper/intrinsics/round.h"
#include "src/core/NEON/wrapper/intrinsics/setlane.h"
+#include "src/core/NEON/wrapper/intrinsics/shr.h"
#include "src/core/NEON/wrapper/intrinsics/sin.h"
#include "src/core/NEON/wrapper/intrinsics/sqrt.h"
#include "src/core/NEON/wrapper/intrinsics/store.h"
@@ -72,4 +75,17 @@
#include "src/core/NEON/wrapper/intrinsics/tanh.h"
#include "src/core/NEON/wrapper/intrinsics/tbl.h"
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/wrapper/intrinsics/svcnt.h"
+#include "src/core/NEON/wrapper/intrinsics/svcvt.h"
+#include "src/core/NEON/wrapper/intrinsics/svdup_n.h"
+#include "src/core/NEON/wrapper/intrinsics/svexp.h"
+#include "src/core/NEON/wrapper/intrinsics/svlog.h"
+#include "src/core/NEON/wrapper/intrinsics/svpow.h"
+#include "src/core/NEON/wrapper/intrinsics/svptrue.h"
+#include "src/core/NEON/wrapper/intrinsics/svqadd.h"
+#include "src/core/NEON/wrapper/intrinsics/svsin.h"
+#include "src/core/NEON/wrapper/intrinsics/svwhilelt.h"
+#endif /* defined(__ARM_FEATURE_SVE) */
+
#endif /* ARM_COMPUTE_WRAPPER_INTRINSICS_H */
diff --git a/src/core/NEON/wrapper/intrinsics/inv.h b/src/core/NEON/wrapper/intrinsics/inv.h
index de398b0403..e443be679b 100644
--- a/src/core/NEON/wrapper/intrinsics/inv.h
+++ b/src/core/NEON/wrapper/intrinsics/inv.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_INV_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/invsqrt.h b/src/core/NEON/wrapper/intrinsics/invsqrt.h
index 2343efa8f8..257b445cc7 100644
--- a/src/core/NEON/wrapper/intrinsics/invsqrt.h
+++ b/src/core/NEON/wrapper/intrinsics/invsqrt.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_INVSQRT_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/log.h b/src/core/NEON/wrapper/intrinsics/log.h
index 357a77ca78..d091407edb 100644
--- a/src/core/NEON/wrapper/intrinsics/log.h
+++ b/src/core/NEON/wrapper/intrinsics/log.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_LOG_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/max.h b/src/core/NEON/wrapper/intrinsics/max.h
index cec437d171..32d38a856c 100644
--- a/src/core/NEON/wrapper/intrinsics/max.h
+++ b/src/core/NEON/wrapper/intrinsics/max.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_WRAPPER_MAX_H
-#define ARM_COMPUTE_WRAPPER_MAX_H
+#ifndef ACL_SRC_CORE_NEON_WRAPPER_INTRINSICS_MAX_H
+#define ACL_SRC_CORE_NEON_WRAPPER_INTRINSICS_MAX_H
#include <arm_neon.h>
@@ -59,6 +59,39 @@ VMAX_IMPL(float16_t, float16x8_t, vmaxq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VMAX_IMPL
+
+#if defined(__aarch64__)
+// VMAXV: Across vector max
+#define VMAXV_IMPL(stype, vtype, prefix, postfix) \
+ inline stype vmaxv(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VMAXV_IMPL(uint8_t, uint8x8_t, vmaxv, u8)
+VMAXV_IMPL(int8_t, int8x8_t, vmaxv, s8)
+VMAXV_IMPL(uint16_t, uint16x4_t, vmaxv, u16)
+VMAXV_IMPL(int16_t, int16x4_t, vmaxv, s16)
+VMAXV_IMPL(uint32_t, uint32x2_t, vmaxv, u32)
+VMAXV_IMPL(int32_t, int32x2_t, vmaxv, s32)
+VMAXV_IMPL(float, float32x2_t, vmaxv, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMAXV_IMPL(float16_t, float16x4_t, vmaxv, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VMAXV_IMPL(uint8_t, uint8x16_t, vmaxvq, u8)
+VMAXV_IMPL(int8_t, int8x16_t, vmaxvq, s8)
+VMAXV_IMPL(uint16_t, uint16x8_t, vmaxvq, u16)
+VMAXV_IMPL(int16_t, int16x8_t, vmaxvq, s16)
+VMAXV_IMPL(uint32_t, uint32x4_t, vmaxvq, u32)
+VMAXV_IMPL(int32_t, int32x4_t, vmaxvq, s32)
+VMAXV_IMPL(float, float32x4_t, vmaxvq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMAXV_IMPL(float16_t, float16x8_t, vmaxvq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VMAXV_IMPL
+#endif // defined(__aarch64__)
} // namespace wrapper
} // namespace arm_compute
-#endif /* ARM_COMPUTE_WRAPPER_MAX_H */
+#endif // ACL_SRC_CORE_NEON_WRAPPER_INTRINSICS_MAX_H
diff --git a/src/core/NEON/wrapper/intrinsics/mla.h b/src/core/NEON/wrapper/intrinsics/mla.h
index 2b38b34137..9fb5a08f9b 100644
--- a/src/core/NEON/wrapper/intrinsics/mla.h
+++ b/src/core/NEON/wrapper/intrinsics/mla.h
@@ -66,6 +66,22 @@ VMLA_IMPL2(float16x8_t, float16x8_t, vaddq, vmulq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VMLA_IMPL
+
+#define VMLAL_IMPL(vtype_in, vtype_out, postfix) \
+ inline vtype_out vmlal(const vtype_out &a, const vtype_in &b, const vtype_in &c) \
+ { \
+ return vmlal_##postfix(a, b, c); \
+ }
+
+VMLAL_IMPL(uint8x8_t, uint16x8_t, u8)
+VMLAL_IMPL(int8x8_t, int16x8_t, s8)
+VMLAL_IMPL(uint16x4_t, uint32x4_t, u16)
+VMLAL_IMPL(int16x4_t, int32x4_t, s16)
+VMLAL_IMPL(uint32x2_t, uint64x2_t, u32)
+VMLAL_IMPL(int32x2_t, int64x2_t, s32)
+
+#undef VMLAL_IMPL
+
} // namespace wrapper
} // namespace arm_compute
#endif /* ARM_COMPUTE_WRAPPER_MLA_H */
diff --git a/src/core/NEON/wrapper/intrinsics/pow.h b/src/core/NEON/wrapper/intrinsics/pow.h
index 61f834ed23..dfd6ccc358 100644
--- a/src/core/NEON/wrapper/intrinsics/pow.h
+++ b/src/core/NEON/wrapper/intrinsics/pow.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_POW_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/qmov.h b/src/core/NEON/wrapper/intrinsics/qmov.h
index 167f3cf43b..9a0a23a241 100644
--- a/src/core/NEON/wrapper/intrinsics/qmov.h
+++ b/src/core/NEON/wrapper/intrinsics/qmov.h
@@ -31,15 +31,13 @@ namespace arm_compute
namespace wrapper
{
template <typename T>
-inline typename std::enable_if<std::is_same<T, uint8_t>::value, uint8x8_t>::type
-vqmov(const int16x8_t &a)
+inline typename std::enable_if<std::is_same<T, uint8_t>::value, uint8x8_t>::type vqmov(const int16x8_t &a)
{
return vqmovun_s16(a);
}
template <typename T>
-inline typename std::enable_if<std::is_same<T, int8_t>::value, int8x8_t>::type
-vqmov(const int16x8_t &a)
+inline typename std::enable_if<std::is_same<T, int8_t>::value, int8x8_t>::type vqmov(const int16x8_t &a)
{
return vqmovn_s16(a);
}
diff --git a/src/core/NEON/wrapper/intrinsics/reinterpret.h b/src/core/NEON/wrapper/intrinsics/reinterpret.h
index 0c26cd9008..c2c4f720d2 100644
--- a/src/core/NEON/wrapper/intrinsics/reinterpret.h
+++ b/src/core/NEON/wrapper/intrinsics/reinterpret.h
@@ -35,14 +35,14 @@ namespace wrapper
{ \
return prefix##_##postfix1##_##postfix2(a); \
} \
- \
+ \
inline ptype vreinterpret(const ptype &a) \
{ \
return a; \
}
VREINTERPRET_IMPL(int16x4_t, uint16x4_t, vreinterpret, s16, u16)
-
+VREINTERPRET_IMPL(int16x8_t, uint16x8_t, vreinterpretq, s16, u16)
VREINTERPRET_IMPL(int32x4_t, uint32x4_t, vreinterpretq, s32, u32)
} // namespace wrapper
} // namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/round.h b/src/core/NEON/wrapper/intrinsics/round.h
index d23feb6b42..7789aab770 100644
--- a/src/core/NEON/wrapper/intrinsics/round.h
+++ b/src/core/NEON/wrapper/intrinsics/round.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_ROUND_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/intrinsics/setlane.h b/src/core/NEON/wrapper/intrinsics/setlane.h
index 197eedacb5..259b8eaf90 100644
--- a/src/core/NEON/wrapper/intrinsics/setlane.h
+++ b/src/core/NEON/wrapper/intrinsics/setlane.h
@@ -33,7 +33,7 @@ namespace wrapper
#define VSETLANE_IMPL_8(stype, atype, vtype, postfix) \
inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vset_lane_##postfix(value, vector, 0); \
@@ -59,7 +59,7 @@ namespace wrapper
#define VSETLANE_IMPL_4(stype, atype, vtype, postfix) \
inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vset_lane_##postfix(value, vector, 0); \
@@ -77,7 +77,7 @@ namespace wrapper
#define VSETLANE_IMPL_2(stype, atype, vtype, postfix) \
inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vset_lane_##postfix(value, vector, 0); \
@@ -102,7 +102,7 @@ VSETLANE_IMPL_4(float16x4_t, float16_t, float16x4_t, f16)
#define VSETQLANE_IMPL_16(stype, atype, vtype, postfix) \
inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vsetq_lane_##postfix(value, vector, 0); \
@@ -144,7 +144,7 @@ VSETLANE_IMPL_4(float16x4_t, float16_t, float16x4_t, f16)
#define VSETQLANE_IMPL_8(stype, atype, vtype, postfix) \
inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vsetq_lane_##postfix(value, vector, 0); \
@@ -170,7 +170,7 @@ VSETLANE_IMPL_4(float16x4_t, float16_t, float16x4_t, f16)
#define VSETQLANE_IMPL_4(stype, atype, vtype, postfix) \
inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
{ \
- switch(lane) \
+ switch (lane) \
{ \
case 0: \
return vsetq_lane_##postfix(value, vector, 0); \
diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h
new file mode 100644
index 0000000000..6ccb9cdf92
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/shr.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_WRAPPER_SHR_H
+#define ARM_COMPUTE_WRAPPER_SHR_H
+
+#include <arm_neon.h>
+#include <type_traits>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VQRSHRN_IMPL(half_vtype, vtype, prefix, postfix) \
+ template <int b> \
+ inline half_vtype vqrshrn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VQRSHRN_IMPL(int8x8_t, int16x8_t, vqrshrn_n, s16)
+VQRSHRN_IMPL(uint8x8_t, uint16x8_t, vqrshrn_n, u16)
+VQRSHRN_IMPL(int16x4_t, int32x4_t, vqrshrn_n, s32)
+VQRSHRN_IMPL(uint16x4_t, uint32x4_t, vqrshrn_n, u32)
+VQRSHRN_IMPL(int32x2_t, int64x2_t, vqrshrn_n, s64)
+VQRSHRN_IMPL(uint32x2_t, uint64x2_t, vqrshrn_n, u64)
+
+#undef VQRSHRN_IMPL
+
+#ifdef __aarch64__
+#define VQRSHRN_SCALAR_IMPL(half_vtype, vtype, prefix, postfix) \
+ template <int b> \
+ inline half_vtype vqrshrn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQRSHRN_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, s16)
+VQRSHRN_SCALAR_IMPL(uint8_t, uint16_t, vqrshrnh_n, u16)
+VQRSHRN_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, s32)
+VQRSHRN_SCALAR_IMPL(uint16_t, uint32_t, vqrshrns_n, u32)
+VQRSHRN_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, s64)
+VQRSHRN_SCALAR_IMPL(uint32_t, uint64_t, vqrshrnd_n, u64)
+
+#undef VQRSHRN_SCALAR_IMPL
+#endif // __aarch64__
+
+// This function is the mixed version of VQRSHRN and VQRSHRUN.
+// The input vector is always signed integer, while the returned vector
+// can be either signed or unsigned depending on the signedness of scalar type T.
+#define VQRSHRN_EX_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_signed##_##postfix(a, b); \
+ } \
+ \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_unsigned##_##postfix(a, b); \
+ }
+VQRSHRN_EX_IMPL(int8x8_t, int16x8_t, vqrshrn_n, vqrshrun_n, s16)
+VQRSHRN_EX_IMPL(int16x4_t, int32x4_t, vqrshrn_n, vqrshrun_n, s32)
+VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64)
+#undef VQRSHRN_EX_IMPL
+
+#define VSHR_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshr_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHR_IMPL(uint8x8_t, vshr_n, u8)
+VSHR_IMPL(int8x8_t, vshr_n, s8)
+#undef VSHR_IMPL
+
+#define VSHRQ_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshrq_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHRQ_IMPL(uint32x4_t, vshrq_n, u32)
+VSHRQ_IMPL(int32x4_t, vshrq_n, s32)
+#undef VSHRQ_IMPL
+
+#ifdef __aarch64__
+#define VSHRQ_SCALAR_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshrq_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHRQ_SCALAR_IMPL(uint32_t, vshrd_n, u64)
+VSHRQ_SCALAR_IMPL(int32_t, vshrd_n, s64)
+
+#undef VSHRQ_SCALAR_IMPL
+#endif // __aarch64__
+
+#ifdef __aarch64__
+#define VQRSHRN_EX_SCALAR_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_signed##_##postfix(a, b); \
+ } \
+ \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_unsigned##_##postfix(a, b); \
+ }
+
+VQRSHRN_EX_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, vqrshrunh_n, s16)
+VQRSHRN_EX_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, vqrshruns_n, s32)
+VQRSHRN_EX_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, vqrshrund_n, s64)
+
+#undef VQRSHRN_EX_IMPL
+#endif // __aarch64__
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_SHR_H */
diff --git a/src/core/NEON/wrapper/intrinsics/sin.h b/src/core/NEON/wrapper/intrinsics/sin.h
index 03c2813a32..d24fdfa816 100644
--- a/src/core/NEON/wrapper/intrinsics/sin.h
+++ b/src/core/NEON/wrapper/intrinsics/sin.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_SIN_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
@@ -54,4 +55,4 @@ VSIN_IMPL_INT(int32x4_t, vsinq, s32)
#undef vsub_IMPL
} // namespace wrapper
} // namespace arm_compute
-#endif /* ARM_COMPUTE_WRAPPER_SUB_H */ \ No newline at end of file
+#endif /* ARM_COMPUTE_WRAPPER_SUB_H */
diff --git a/src/core/NEON/wrapper/intrinsics/store.h b/src/core/NEON/wrapper/intrinsics/store.h
index 6dda432ea9..ce1b9a554e 100644
--- a/src/core/NEON/wrapper/intrinsics/store.h
+++ b/src/core/NEON/wrapper/intrinsics/store.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,8 +44,6 @@ VSTORE_IMPL(uint16_t, uint16x4_t, vst1, u16)
VSTORE_IMPL(int16_t, int16x4_t, vst1, s16)
VSTORE_IMPL(uint32_t, uint32x2_t, vst1, u32)
VSTORE_IMPL(int32_t, int32x2_t, vst1, s32)
-//VSTORE_IMPL(uint64_t, 1, vst1, u64)
-//VSTORE_IMPL(int64_t, 1, vst1, s64)
VSTORE_IMPL(float, float32x2_t, vst1, f32)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
VSTORE_IMPL(float16_t, float16x4_t, vst1, f16)
@@ -57,8 +55,6 @@ VSTORE_IMPL(uint16_t, uint16x8_t, vst1q, u16)
VSTORE_IMPL(int16_t, int16x8_t, vst1q, s16)
VSTORE_IMPL(uint32_t, uint32x4_t, vst1q, u32)
VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32)
-//VSTORE_IMPL(uint64_t, 2, vst1q, u64)
-//VSTORE_IMPL(int64_t, 2, vst1q, s64)
VSTORE_IMPL(float, float32x4_t, vst1q, f32)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
VSTORE_IMPL(float16_t, float16x8_t, vst1q, f16)
diff --git a/src/core/NEON/wrapper/intrinsics/sub.h b/src/core/NEON/wrapper/intrinsics/sub.h
index 475986d0f6..20436714ef 100644
--- a/src/core/NEON/wrapper/intrinsics/sub.h
+++ b/src/core/NEON/wrapper/intrinsics/sub.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,6 +98,21 @@ VQSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VQSUB_IMPL
+#define VSUBL_IMPL(rtype, vtype, prefix, postfix) \
+ inline rtype vsubl(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VSUBL_IMPL(int16x8_t, int8x8_t, vsubl, s8)
+VSUBL_IMPL(int32x4_t, int16x4_t, vsubl, s16)
+VSUBL_IMPL(int64x2_t, int32x2_t, vsubl, s32)
+VSUBL_IMPL(uint16x8_t, uint8x8_t, vsubl, u8)
+VSUBL_IMPL(uint32x4_t, uint16x4_t, vsubl, u16)
+VSUBL_IMPL(uint64x2_t, uint32x2_t, vsubl, u32)
+
+#undef VSUB_IMPL
+
} // namespace wrapper
} // namespace arm_compute
#endif /* ARM_COMPUTE_WRAPPER_SUB_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svcnt.h b/src/core/NEON/wrapper/intrinsics/svcnt.h
new file mode 100644
index 0000000000..c4652504b4
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svcnt.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCNT_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCNT_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+template <size_t element_size>
+inline uint64_t svcnt_size();
+
+template <>
+inline uint64_t svcnt_size<64>()
+{
+ return svcntd();
+}
+
+template <>
+inline uint64_t svcnt_size<32>()
+{
+ return svcntw();
+}
+
+template <>
+inline uint64_t svcnt_size<16>()
+{
+ return svcnth();
+}
+
+template <>
+inline uint64_t svcnt_size<8>()
+{
+ return svcntb();
+}
+
+template <typename T>
+inline uint64_t svcnt()
+{
+ return svcnt_size<sizeof(T) * 8>();
+}
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCNT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svcvt.h b/src/core/NEON/wrapper/intrinsics/svcvt.h
new file mode 100644
index 0000000000..00ef7b7eb3
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svcvt.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCVT_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCVT_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVCVT_Z_TO_F32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float>::value, svfloat32_t>::type svcvt_z(svbool_t pg, \
+ const vtype &a) \
+ { \
+ return svcvt_f32_z(pg, a); \
+ }
+
+SVCVT_Z_TO_F32_IMPL(svuint32_t)
+SVCVT_Z_TO_F32_IMPL(svint32_t)
+SVCVT_Z_TO_F32_IMPL(svfloat16_t)
+
+#undef SVCVT_Z_TO_F32_IMPL
+
+#define SVCVT_Z_TO_F16_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float16_t>::value, svfloat16_t>::type svcvt_z(svbool_t pg, \
+ const vtype &a) \
+ { \
+ return svcvt_f16_z(pg, a); \
+ }
+
+SVCVT_Z_TO_F16_IMPL(svuint32_t)
+SVCVT_Z_TO_F16_IMPL(svint32_t)
+SVCVT_Z_TO_F16_IMPL(svfloat32_t)
+
+#undef SVCVT_Z_TO_F16_IMPL
+
+#define SVCVT_Z_TO_S32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, int32_t>::value, svint32_t>::type svcvt_z(svbool_t pg, \
+ const vtype &a) \
+ { \
+ return svcvt_s32_z(pg, a); \
+ }
+
+SVCVT_Z_TO_S32_IMPL(svfloat16_t)
+SVCVT_Z_TO_S32_IMPL(svfloat32_t)
+
+#undef SVCVT_Z_TO_S32_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCVT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svdup_n.h b/src/core/NEON/wrapper/intrinsics/svdup_n.h
new file mode 100644
index 0000000000..9c42c86db7
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svdup_n.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020, 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVDUP_N_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVDUP_N_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVDUP_N_IMPL(etype, vtype, postfix) \
+ inline vtype svdup_n(etype a) \
+ { \
+ return svdup_n_##postfix(a); \
+ }
+
+SVDUP_N_IMPL(int8_t, svint8_t, s8)
+SVDUP_N_IMPL(int16_t, svint16_t, s16)
+SVDUP_N_IMPL(int32_t, svint32_t, s32)
+SVDUP_N_IMPL(int64_t, svint64_t, s64)
+SVDUP_N_IMPL(uint8_t, svuint8_t, u8)
+SVDUP_N_IMPL(uint16_t, svuint16_t, u16)
+SVDUP_N_IMPL(uint32_t, svuint32_t, u32)
+SVDUP_N_IMPL(uint64_t, svuint64_t, u64)
+SVDUP_N_IMPL(float16_t, svfloat16_t, f16)
+SVDUP_N_IMPL(float, svfloat32_t, f32)
+SVDUP_N_IMPL(float64_t, svfloat64_t, f64)
+#if __ARM_FEATURE_SVE_BF16
+SVDUP_N_IMPL(bfloat16_t, svbfloat16_t, bf16)
+#endif // #if __ARM_FEATURE_SVE_BF16
+
+#undef SVDUP_N_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVDUP_N_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svexp.h b/src/core/NEON/wrapper/intrinsics/svexp.h
new file mode 100644
index 0000000000..1e8bce3960
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svexp.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVEXP_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVEXP_H
+
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+
+#include <arm_sve.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVEXP_IMPL(vtype, postfix) \
+ inline vtype svexp_z(svbool_t pg, const vtype &a) \
+ { \
+ return svexp_##postfix##_z(pg, a); \
+ }
+
+SVEXP_IMPL(svfloat32_t, f32)
+SVEXP_IMPL(svfloat16_t, f16)
+
+#undef SVEXP_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVEXP_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svlog.h b/src/core/NEON/wrapper/intrinsics/svlog.h
new file mode 100644
index 0000000000..b4630e20ed
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svlog.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVLOG_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVLOG_H
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+
+#include <arm_sve.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVLOG_IMPL(vtype, postfix) \
+ inline vtype svlog_z(svbool_t pg, const vtype &a) \
+ { \
+ return svlog_##postfix##_z(pg, a); \
+ }
+
+SVLOG_IMPL(svfloat32_t, f32)
+SVLOG_IMPL(svfloat16_t, f16)
+
+#undef SVLOG_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVLOG_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svpow.h b/src/core/NEON/wrapper/intrinsics/svpow.h
new file mode 100644
index 0000000000..0f58d758cb
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svpow.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPOW_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPOW_H
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVPOW_Z_IMPL(type, postfix) \
+ inline type svpow_z(svbool_t pg, const type &a, const type &b) \
+ { \
+ return svpow_##postfix##_z(pg, a, b); \
+ }
+
+#define SVPOW_Z_IMPL_INT(type, postfix) \
+ inline type svpow_z(svbool_t pg, const type &a, const type &b) \
+ { \
+ ARM_COMPUTE_UNUSED(pg, a, b); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+SVPOW_Z_IMPL(svfloat32_t, f32)
+SVPOW_Z_IMPL(svfloat16_t, f16)
+SVPOW_Z_IMPL_INT(svint16_t, s16)
+
+#undef SVPOW_Z_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPOW_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svptrue.h b/src/core/NEON/wrapper/intrinsics/svptrue.h
new file mode 100644
index 0000000000..6ed00bccbf
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svptrue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPTRUE_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPTRUE_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+template <size_t element_size>
+inline svbool_t svptrue_size();
+
+template <>
+inline svbool_t svptrue_size<64>()
+{
+ return svptrue_b64();
+}
+
+template <>
+inline svbool_t svptrue_size<32>()
+{
+ return svptrue_b32();
+}
+
+template <>
+inline svbool_t svptrue_size<16>()
+{
+ return svptrue_b16();
+}
+
+template <>
+inline svbool_t svptrue_size<8>()
+{
+ return svptrue_b8();
+}
+
+template <typename T>
+svbool_t svptrue()
+{
+ return svptrue_size<sizeof(T) * 8>();
+}
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPTRUE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svqadd.h b/src/core/NEON/wrapper/intrinsics/svqadd.h
new file mode 100644
index 0000000000..fd45d82104
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svqadd.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVQADD_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVQADD_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVQADD_IMPL_F(type, postfix, svppostfix) \
+ inline type svqadd(const type &val1, const type &val2) \
+ { \
+ return svadd_##postfix##_z(svptrue_##svppostfix(), val1, val2); \
+ }
+
+SVQADD_IMPL_F(svfloat32_t, f32, b32)
+SVQADD_IMPL_F(svfloat16_t, f16, b16)
+#undef SVQADD_IMPL_F
+
+#define SVQADD_IMPL(type, postfix) \
+ inline type svqadd(const type &val1, const type &val2) \
+ { \
+ return svqadd_##postfix(val1, val2); \
+ }
+
+SVQADD_IMPL(svint32_t, s32)
+SVQADD_IMPL(svint16_t, s16)
+SVQADD_IMPL(svint8_t, s8)
+SVQADD_IMPL(svuint32_t, u32)
+SVQADD_IMPL(svuint16_t, u16)
+SVQADD_IMPL(svuint8_t, u8)
+
+#undef SVQADD_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVQADD_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svreinterpret.h b/src/core/NEON/wrapper/intrinsics/svreinterpret.h
new file mode 100644
index 0000000000..e98742676d
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svreinterpret.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVREINTERPRET_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVREINTERPRET_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVREINTERPRET_TO_F32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float>::value, svfloat32_t>::type svreinterpret(const vtype &a) \
+ { \
+ return svreinterpret_f32(a); \
+ }
+
+SVREINTERPRET_TO_F32_IMPL(svuint32_t)
+#undef SVREINTERPRET_TO_F32_IMPL
+
+#define SVREINTERPRET_TO_U32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, uint32_t>::value, svuint32_t>::type svreinterpret(const vtype &a) \
+ { \
+ return svreinterpret_u32(a); \
+ }
+
+SVREINTERPRET_TO_U32_IMPL(svint32_t)
+SVREINTERPRET_TO_U32_IMPL(svfloat32_t)
+#undef SVREINTERPRET_TO_U32_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVREINTERPRET_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svsin.h b/src/core/NEON/wrapper/intrinsics/svsin.h
new file mode 100644
index 0000000000..05d88d0250
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svsin.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVSIN_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVSIN_H
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVSIN_Z_IMPL(type, postfix) \
+ inline type svsin_z(svbool_t pg, const type &val) \
+ { \
+ return svsin_##postfix##_z(pg, val); \
+ }
+
+SVSIN_Z_IMPL(svfloat32_t, f32)
+SVSIN_Z_IMPL(svfloat16_t, f16)
+
+#undef SVSIN_Z_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVSIN_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svwhilelt.h b/src/core/NEON/wrapper/intrinsics/svwhilelt.h
new file mode 100644
index 0000000000..f0f84a9508
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svwhilelt.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVWHILELT_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVWHILELT_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVWHILELT_IMPL(type) \
+ template <size_t element_size> \
+ inline svbool_t svwhilelt_size(type a, type b); \
+ \
+ template <> \
+ inline svbool_t svwhilelt_size<64>(type a, type b) \
+ { \
+ return svwhilelt_b64(a, b); \
+ } \
+ template <> \
+ inline svbool_t svwhilelt_size<32>(type a, type b) \
+ { \
+ return svwhilelt_b32(a, b); \
+ } \
+ template <> \
+ inline svbool_t svwhilelt_size<16>(type a, type b) \
+ { \
+ return svwhilelt_b16(a, b); \
+ } \
+ template <> \
+ inline svbool_t svwhilelt_size<8>(type a, type b) \
+ { \
+ return svwhilelt_b8(a, b); \
+ }
+
+SVWHILELT_IMPL(int32_t)
+SVWHILELT_IMPL(uint32_t)
+SVWHILELT_IMPL(int64_t)
+SVWHILELT_IMPL(uint64_t)
+
+#undef SVWHILELT_IMPL
+
+template <typename ScalarType, typename IndexType>
+inline svbool_t svwhilelt(IndexType a, IndexType b)
+{
+ return svwhilelt_size<sizeof(ScalarType) * 8>(a, b);
+}
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVWHILELT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/tanh.h b/src/core/NEON/wrapper/intrinsics/tanh.h
index daeaf19997..e74f0e86fe 100644
--- a/src/core/NEON/wrapper/intrinsics/tanh.h
+++ b/src/core/NEON/wrapper/intrinsics/tanh.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_WRAPPER_TANH_H
#include "src/core/NEON/NEMath.h"
+
#include <arm_neon.h>
namespace arm_compute
diff --git a/src/core/NEON/wrapper/scalar/add.h b/src/core/NEON/wrapper/scalar/add.h
index 642d9261f3..2ec88869e3 100644
--- a/src/core/NEON/wrapper/scalar/add.h
+++ b/src/core/NEON/wrapper/scalar/add.h
@@ -32,22 +32,22 @@ namespace wrapper
{
inline uint8_t add_sat(const uint8_t &a, const uint8_t &b)
{
- const uint8x8_t va = { a, 0, 0, 0, 0, 0, 0, 0 };
- const uint8x8_t vb = { b, 0, 0, 0, 0, 0, 0, 0 };
+ const uint8x8_t va = {a, 0, 0, 0, 0, 0, 0, 0};
+ const uint8x8_t vb = {b, 0, 0, 0, 0, 0, 0, 0};
return vget_lane_u8(vqadd_u8(va, vb), 0);
}
inline int16_t add_sat(const int16_t &a, const int16_t &b)
{
- const int16x4_t va = { a, 0, 0, 0 };
- const int16x4_t vb = { b, 0, 0, 0 };
+ const int16x4_t va = {a, 0, 0, 0};
+ const int16x4_t vb = {b, 0, 0, 0};
return vget_lane_s16(vqadd_s16(va, vb), 0);
}
inline int32_t add_sat(const int32_t &a, const int32_t &b)
{
- const int32x2_t va = { a, 0 };
- const int32x2_t vb = { b, 0 };
+ const int32x2_t va = {a, 0};
+ const int32x2_t vb = {b, 0};
return vget_lane_s32(vqadd_s32(va, vb), 0);
}
diff --git a/src/core/NEON/wrapper/scalar/sub.h b/src/core/NEON/wrapper/scalar/sub.h
index 1fe51d75fc..00de7d867f 100644
--- a/src/core/NEON/wrapper/scalar/sub.h
+++ b/src/core/NEON/wrapper/scalar/sub.h
@@ -32,22 +32,22 @@ namespace wrapper
{
inline uint8_t sub_sat(const uint8_t &a, const uint8_t &b)
{
- const uint8x8_t va = { a, 0, 0, 0, 0, 0, 0, 0 };
- const uint8x8_t vb = { b, 0, 0, 0, 0, 0, 0, 0 };
+ const uint8x8_t va = {a, 0, 0, 0, 0, 0, 0, 0};
+ const uint8x8_t vb = {b, 0, 0, 0, 0, 0, 0, 0};
return vget_lane_u8(vqsub_u8(va, vb), 0);
}
inline int16_t sub_sat(const int16_t &a, const int16_t &b)
{
- const int16x4_t va = { a, 0, 0, 0 };
- const int16x4_t vb = { b, 0, 0, 0 };
+ const int16x4_t va = {a, 0, 0, 0};
+ const int16x4_t vb = {b, 0, 0, 0};
return vget_lane_s16(vqsub_s16(va, vb), 0);
}
inline int32_t sub_sat(const int32_t &a, const int32_t &b)
{
- const int32x2_t va = { a, 0 };
- const int32x2_t vb = { b, 0 };
+ const int32x2_t va = {a, 0};
+ const int32x2_t vb = {b, 0};
return vget_lane_s32(vqsub_s32(va, vb), 0);
}
diff --git a/src/core/NEON/wrapper/svtraits.h b/src/core/NEON/wrapper/svtraits.h
new file mode 100644
index 0000000000..330d272752
--- /dev/null
+++ b/src/core/NEON/wrapper/svtraits.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_SVTRAITS_H
+#define SRC_CORE_NEON_WRAPPER_SVTRAITS_H
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+#include "src/core/NEON/SVEMath.h"
+
+#include <arm_sve.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+template <typename T>
+struct sve_scalar;
+template <typename T>
+struct sve_vector;
+
+#define DEFINE_TYPES(stype) \
+ template <> \
+ struct sve_scalar<sv##stype> \
+ { \
+ using type = stype; \
+ }; \
+ template <> \
+ struct sve_vector<stype> \
+ { \
+ using type = sv##stype; \
+ };
+
+DEFINE_TYPES(int8_t)
+DEFINE_TYPES(uint8_t)
+DEFINE_TYPES(int16_t)
+DEFINE_TYPES(uint16_t)
+DEFINE_TYPES(int32_t)
+DEFINE_TYPES(uint32_t)
+DEFINE_TYPES(int64_t)
+DEFINE_TYPES(uint64_t)
+DEFINE_TYPES(float16_t)
+DEFINE_TYPES(float32_t)
+DEFINE_TYPES(float64_t)
+
+#if __ARM_FEATURE_SVE_BF16
+DEFINE_TYPES(bfloat16_t)
+#endif // #if __ARM_FEATURE_SVE_BF16
+
+#undef DEFINE_TYPES
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
+#endif /* #ifndef SRC_CORE_NEON_WRAPPER_SVTRAITS_H */
diff --git a/src/core/NEON/wrapper/traits.h b/src/core/NEON/wrapper/traits.h
index eafbeef372..1dac61af74 100644
--- a/src/core/NEON/wrapper/traits.h
+++ b/src/core/NEON/wrapper/traits.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,11 +21,24 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_WRAPPER_TRAITS_H
-#define ARM_COMPUTE_WRAPPER_TRAITS_H
+#ifndef ACL_SRC_CORE_NEON_WRAPPER_TRAITS_H
+#define ACL_SRC_CORE_NEON_WRAPPER_TRAITS_H
+
+#include "arm_compute/core/CoreTypes.h"
+
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#include "src/cpu/CpuTypes.h" // required for float16_t
+#endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
#include <arm_neon.h>
+#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+#endif /* defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FEATURE_SVE) */
+
+#include <cmath>
+#include <cstdint>
+
namespace arm_compute
{
namespace wrapper
@@ -40,7 +53,7 @@ struct vector_64_tag {};
/** 128-bit vector tag */
struct vector_128_tag {};
-/** Create the appropriate NEON vector given its type and size in terms of elements */
+/** Create the appropriate SIMD vector given its type and size in terms of elements */
template <typename T, int S> struct neon_vector;
// Specializations
@@ -84,7 +97,7 @@ enum class BitWidth
W128, /**< 128-bit width */
};
-/** Create the appropriate NEON vector given its type and size in terms of bits */
+/** Create the appropriate SIMD vector given its type and size in terms of bits */
template <typename T, BitWidth BW> struct neon_bitvector;
// Specializations
#ifndef DOXYGEN_SKIP_THIS
@@ -110,6 +123,16 @@ template <> struct neon_bitvector<float_t, BitWidth::W128>{ using type = float32
template <> struct neon_bitvector<float16_t, BitWidth::W64>{ using type = float16x4_t; using tag_type = vector_64_tag; };
template <> struct neon_bitvector<float16_t, BitWidth::W128>{ using type = float16x8_t; using tag_type = vector_128_tag; };
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+
+#if defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FEATURE_SVE)
+/** Create the appropriate SVE vector given its type */
+template <typename T> struct sve_vector;
+
+template <> struct sve_vector<uint8_t>{ using scalar_type = uint8_t; using type = svuint8_t; };
+template <> struct sve_vector<int8_t>{ using scalar_type = int8_t; using type = svint8_t; };
+#endif /* defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FEATURE_SVE) */
+
#endif /* DOXYGEN_SKIP_THIS */
/** Helper type template to get the type of a neon vector */
@@ -137,4 +160,4 @@ using promote_t = typename promote<T>::type;
} // namespace traits
} // namespace wrapper
} // namespace arm_compute
-#endif /* ARM_COMPUTE_WRAPPER_TRAITS_H */
+#endif // ACL_SRC_CORE_NEON_WRAPPER_TRAITS_H
diff --git a/src/core/NEON/wrapper/wrapper.h b/src/core/NEON/wrapper/wrapper.h
index e5467e98ff..f3f3c5d9e6 100644
--- a/src/core/NEON/wrapper/wrapper.h
+++ b/src/core/NEON/wrapper/wrapper.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_WRAPPER_H
-#define ARM_COMPUTE_WRAPPER_H
+#ifndef ACL_SRC_CORE_NEON_WRAPPER_WRAPPER_H
+#define ACL_SRC_CORE_NEON_WRAPPER_WRAPPER_H
+
+#include "arm_compute/core/Error.h"
// Traits
#include "src/core/NEON/wrapper/traits.h"
@@ -31,4 +33,4 @@
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
#include "src/core/NEON/wrapper/scalar/scalar.h"
-#endif /* ARM_COMPUTE_WRAPPER_H */
+#endif // ACL_SRC_CORE_NEON_WRAPPER_WRAPPER_H