aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/wrapper/intrinsics
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/wrapper/intrinsics')
-rw-r--r--src/core/NEON/wrapper/intrinsics/abs.h75
-rw-r--r--src/core/NEON/wrapper/intrinsics/add.h201
-rw-r--r--src/core/NEON/wrapper/intrinsics/and.h60
-rw-r--r--src/core/NEON/wrapper/intrinsics/bsl.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/ceq.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/cge.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/cgt.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/cgtz.h62
-rw-r--r--src/core/NEON/wrapper/intrinsics/cle.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/clt.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/combine.h53
-rw-r--r--src/core/NEON/wrapper/intrinsics/cvt.h107
-rw-r--r--src/core/NEON/wrapper/intrinsics/div.h74
-rw-r--r--src/core/NEON/wrapper/intrinsics/dup_n.h66
-rw-r--r--src/core/NEON/wrapper/intrinsics/eor.h56
-rw-r--r--src/core/NEON/wrapper/intrinsics/erf.h52
-rw-r--r--src/core/NEON/wrapper/intrinsics/exp.h57
-rw-r--r--src/core/NEON/wrapper/intrinsics/ext.h62
-rw-r--r--src/core/NEON/wrapper/intrinsics/gethigh.h53
-rw-r--r--src/core/NEON/wrapper/intrinsics/getlane.h223
-rw-r--r--src/core/NEON/wrapper/intrinsics/getlow.h53
-rw-r--r--src/core/NEON/wrapper/intrinsics/intrinsics.h91
-rw-r--r--src/core/NEON/wrapper/intrinsics/inv.h63
-rw-r--r--src/core/NEON/wrapper/intrinsics/invsqrt.h62
-rw-r--r--src/core/NEON/wrapper/intrinsics/load.h73
-rw-r--r--src/core/NEON/wrapper/intrinsics/log.h57
-rw-r--r--src/core/NEON/wrapper/intrinsics/max.h97
-rw-r--r--src/core/NEON/wrapper/intrinsics/min.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/mla.h87
-rw-r--r--src/core/NEON/wrapper/intrinsics/movl.h49
-rw-r--r--src/core/NEON/wrapper/intrinsics/movn.h62
-rw-r--r--src/core/NEON/wrapper/intrinsics/mul.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/neg.h58
-rw-r--r--src/core/NEON/wrapper/intrinsics/not.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/orr.h60
-rw-r--r--src/core/NEON/wrapper/intrinsics/pmax.h53
-rw-r--r--src/core/NEON/wrapper/intrinsics/pmin.h53
-rw-r--r--src/core/NEON/wrapper/intrinsics/pow.h49
-rw-r--r--src/core/NEON/wrapper/intrinsics/qmov.h47
-rw-r--r--src/core/NEON/wrapper/intrinsics/qmovun.h46
-rw-r--r--src/core/NEON/wrapper/intrinsics/reinterpret.h49
-rw-r--r--src/core/NEON/wrapper/intrinsics/rev64.h64
-rw-r--r--src/core/NEON/wrapper/intrinsics/round.h57
-rw-r--r--src/core/NEON/wrapper/intrinsics/setlane.h208
-rw-r--r--src/core/NEON/wrapper/intrinsics/shr.h148
-rw-r--r--src/core/NEON/wrapper/intrinsics/sin.h58
-rw-r--r--src/core/NEON/wrapper/intrinsics/sqrt.h56
-rw-r--r--src/core/NEON/wrapper/intrinsics/store.h66
-rw-r--r--src/core/NEON/wrapper/intrinsics/sub.h118
-rw-r--r--src/core/NEON/wrapper/intrinsics/svcnt.h68
-rw-r--r--src/core/NEON/wrapper/intrinsics/svcvt.h77
-rw-r--r--src/core/NEON/wrapper/intrinsics/svdup_n.h59
-rw-r--r--src/core/NEON/wrapper/intrinsics/svexp.h50
-rw-r--r--src/core/NEON/wrapper/intrinsics/svlog.h48
-rw-r--r--src/core/NEON/wrapper/intrinsics/svpow.h55
-rw-r--r--src/core/NEON/wrapper/intrinsics/svptrue.h68
-rw-r--r--src/core/NEON/wrapper/intrinsics/svqadd.h60
-rw-r--r--src/core/NEON/wrapper/intrinsics/svreinterpret.h57
-rw-r--r--src/core/NEON/wrapper/intrinsics/svsin.h47
-rw-r--r--src/core/NEON/wrapper/intrinsics/svwhilelt.h73
-rw-r--r--src/core/NEON/wrapper/intrinsics/tanh.h48
-rw-r--r--src/core/NEON/wrapper/intrinsics/tbl.h45
62 files changed, 4416 insertions, 0 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/abs.h b/src/core/NEON/wrapper/intrinsics/abs.h
new file mode 100644
index 0000000000..0d49a9ebf1
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/abs.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_ABS_H
+#define ARM_COMPUTE_WRAPPER_ABS_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VABS_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vabs(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+#define VQABS_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vqabs(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+// Absolute: vabs{q}_<type>. Vd[i] = |Va[i]|
+VABS_IMPL(int8x8_t, int8x8_t, vabs, s8)
+VABS_IMPL(int16x4_t, int16x4_t, vabs, s16)
+VABS_IMPL(int32x2_t, int32x2_t, vabs, s32)
+VABS_IMPL(float32x2_t, float32x2_t, vabs, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VABS_IMPL(float16x4_t, float16x4_t, vabs, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VABS_IMPL(int8x16_t, int8x16_t, vabsq, s8)
+VABS_IMPL(int16x8_t, int16x8_t, vabsq, s16)
+VABS_IMPL(int32x4_t, int32x4_t, vabsq, s32)
+VABS_IMPL(float32x4_t, float32x4_t, vabsq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VABS_IMPL(float16x8_t, float16x8_t, vabsq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+// Saturating absolute: vqabs{q}_<type>. Vd[i] = sat(|Va[i]|)
+VQABS_IMPL(int8x8_t, int8x8_t, vqabs, s8)
+VQABS_IMPL(int16x4_t, int16x4_t, vqabs, s16)
+VQABS_IMPL(int32x2_t, int32x2_t, vqabs, s32)
+
+VQABS_IMPL(int8x16_t, int8x16_t, vqabsq, s8)
+VQABS_IMPL(int16x8_t, int16x8_t, vqabsq, s16)
+VQABS_IMPL(int32x4_t, int32x4_t, vqabsq, s32)
+
+#undef VABS_IMPL
+#undef VQABS_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_ABS_H */
diff --git a/src/core/NEON/wrapper/intrinsics/add.h b/src/core/NEON/wrapper/intrinsics/add.h
new file mode 100644
index 0000000000..6134d75b29
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/add.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_ADD_H
+#define ARM_COMPUTE_WRAPPER_ADD_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VADD_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vadd(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VADD_IMPL(uint8x8_t, uint8x8_t, vadd, u8)
+VADD_IMPL(int8x8_t, int8x8_t, vadd, s8)
+VADD_IMPL(uint16x4_t, uint16x4_t, vadd, u16)
+VADD_IMPL(int16x4_t, int16x4_t, vadd, s16)
+VADD_IMPL(uint32x2_t, uint32x2_t, vadd, u32)
+VADD_IMPL(int32x2_t, int32x2_t, vadd, s32)
+VADD_IMPL(uint64x1_t, uint64x1_t, vadd, u64)
+VADD_IMPL(int64x1_t, int64x1_t, vadd, s64)
+VADD_IMPL(float32x2_t, float32x2_t, vadd, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VADD_IMPL(float16x4_t, float16x4_t, vadd, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VADD_IMPL(uint8x16_t, uint8x16_t, vaddq, u8)
+VADD_IMPL(int8x16_t, int8x16_t, vaddq, s8)
+VADD_IMPL(uint16x8_t, uint16x8_t, vaddq, u16)
+VADD_IMPL(int16x8_t, int16x8_t, vaddq, s16)
+VADD_IMPL(uint32x4_t, uint32x4_t, vaddq, u32)
+VADD_IMPL(int32x4_t, int32x4_t, vaddq, s32)
+VADD_IMPL(uint64x2_t, uint64x2_t, vaddq, u64)
+VADD_IMPL(int64x2_t, int64x2_t, vaddq, s64)
+VADD_IMPL(float32x4_t, float32x4_t, vaddq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VADD_IMPL(float16x8_t, float16x8_t, vaddq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#undef VADD_IMPL
+
+// VQADD: Vector saturating add (No notion of saturation for floating point)
+#define VQADD_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vqadd(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQADD_IMPL(uint8x8_t, uint8x8_t, vqadd, u8)
+VQADD_IMPL(int8x8_t, int8x8_t, vqadd, s8)
+VQADD_IMPL(uint16x4_t, uint16x4_t, vqadd, u16)
+VQADD_IMPL(int16x4_t, int16x4_t, vqadd, s16)
+VQADD_IMPL(uint32x2_t, uint32x2_t, vqadd, u32)
+VQADD_IMPL(int32x2_t, int32x2_t, vqadd, s32)
+VQADD_IMPL(uint64x1_t, uint64x1_t, vqadd, u64)
+VQADD_IMPL(int64x1_t, int64x1_t, vqadd, s64)
+VQADD_IMPL(float32x2_t, float32x2_t, vadd, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VQADD_IMPL(float16x4_t, float16x4_t, vadd, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VQADD_IMPL(uint8x16_t, uint8x16_t, vqaddq, u8)
+VQADD_IMPL(int8x16_t, int8x16_t, vqaddq, s8)
+VQADD_IMPL(uint16x8_t, uint16x8_t, vqaddq, u16)
+VQADD_IMPL(int16x8_t, int16x8_t, vqaddq, s16)
+VQADD_IMPL(uint32x4_t, uint32x4_t, vqaddq, u32)
+VQADD_IMPL(int32x4_t, int32x4_t, vqaddq, s32)
+VQADD_IMPL(uint64x2_t, uint64x2_t, vqaddq, u64)
+VQADD_IMPL(int64x2_t, int64x2_t, vqaddq, s64)
+VQADD_IMPL(float32x4_t, float32x4_t, vaddq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VQADD_IMPL(float16x8_t, float16x8_t, vaddq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#undef VQADD_IMPL
+
+// VADDW: Vector widening add
+#define VADDW_IMPL(wtype, vtype, prefix, postfix) \
+ inline wtype vaddw(const wtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VADDW_IMPL(uint16x8_t, uint8x8_t, vaddw, u8)
+VADDW_IMPL(int16x8_t, int8x8_t, vaddw, s8)
+VADDW_IMPL(uint32x4_t, uint16x4_t, vaddw, u16)
+VADDW_IMPL(int32x4_t, int16x4_t, vaddw, s16)
+VADDW_IMPL(uint64x2_t, uint32x2_t, vaddw, u32)
+VADDW_IMPL(int64x2_t, int32x2_t, vaddw, s32)
+#undef VADDW_IMPL
+
+// VADDL: Vector long add
+#define VADDL_IMPL(wtype, vtype, prefix, postfix) \
+ inline wtype vaddl(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VADDL_IMPL(uint16x8_t, uint8x8_t, vaddl, u8)
+VADDL_IMPL(int16x8_t, int8x8_t, vaddl, s8)
+VADDL_IMPL(uint32x4_t, uint16x4_t, vaddl, u16)
+VADDL_IMPL(int32x4_t, int16x4_t, vaddl, s16)
+VADDL_IMPL(uint64x2_t, uint32x2_t, vaddl, u32)
+VADDL_IMPL(int64x2_t, int32x2_t, vaddl, s32)
+#undef VADDL_IMPL
+
+#if defined(__aarch64__)
+// VADDV: Across vector add
+#define VADDV_IMPL(stype, vtype, prefix, postfix) \
+ inline stype vaddv(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VADDV_IMPL(uint8_t, uint8x8_t, vaddv, u8)
+VADDV_IMPL(int8_t, int8x8_t, vaddv, s8)
+VADDV_IMPL(uint16_t, uint16x4_t, vaddv, u16)
+VADDV_IMPL(int16_t, int16x4_t, vaddv, s16)
+VADDV_IMPL(uint32_t, uint32x2_t, vaddv, u32)
+VADDV_IMPL(int32_t, int32x2_t, vaddv, s32)
+VADDV_IMPL(float, float32x2_t, vaddv, f32)
+
+VADDV_IMPL(uint8_t, uint8x16_t, vaddvq, u8)
+VADDV_IMPL(int8_t, int8x16_t, vaddvq, s8)
+VADDV_IMPL(uint16_t, uint16x8_t, vaddvq, u16)
+VADDV_IMPL(int16_t, int16x8_t, vaddvq, s16)
+VADDV_IMPL(uint32_t, uint32x4_t, vaddvq, u32)
+VADDV_IMPL(int32_t, int32x4_t, vaddvq, s32)
+VADDV_IMPL(uint64_t, uint64x2_t, vaddvq, u64)
+VADDV_IMPL(int64_t, int64x2_t, vaddvq, s64)
+VADDV_IMPL(float, float32x4_t, vaddvq, f32)
+#undef VADDV_IMPL
+#endif // defined(__aarch64__)
+
+// VPADDL: Signed add long pairwise
+#define VPADDL_IMPL(ltype, vtype, prefix, postfix) \
+ inline ltype vpaddl(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VPADDL_IMPL(uint16x4_t, uint8x8_t, vpaddl, u8)
+VPADDL_IMPL(int16x4_t, int8x8_t, vpaddl, s8)
+VPADDL_IMPL(uint32x2_t, uint16x4_t, vpaddl, u16)
+VPADDL_IMPL(int32x2_t, int16x4_t, vpaddl, s16)
+VPADDL_IMPL(uint64x1_t, uint32x2_t, vpaddl, u32)
+VPADDL_IMPL(int64x1_t, int32x2_t, vpaddl, s32)
+
+VPADDL_IMPL(uint16x8_t, uint8x16_t, vpaddlq, u8)
+VPADDL_IMPL(int16x8_t, int8x16_t, vpaddlq, s8)
+VPADDL_IMPL(uint32x4_t, uint16x8_t, vpaddlq, u16)
+VPADDL_IMPL(int32x4_t, int16x8_t, vpaddlq, s16)
+VPADDL_IMPL(uint64x2_t, uint32x4_t, vpaddlq, u32)
+VPADDL_IMPL(int64x2_t, int32x4_t, vpaddlq, s32)
+#undef VPADDL_IMPL
+
+// VPADD: Add pairwise
+#define VPADD_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vpadd(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8)
+VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8)
+VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16)
+VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16)
+VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32)
+VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32)
+VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VPADD_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_ADD_H */
diff --git a/src/core/NEON/wrapper/intrinsics/and.h b/src/core/NEON/wrapper/intrinsics/and.h
new file mode 100644
index 0000000000..6ff7df3f5a
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/and.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_AND_H
+#define ARM_COMPUTE_WRAPPER_AND_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VAND_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vand(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VAND_IMPL(uint8_t, uint8x8_t, vand, u8)
+VAND_IMPL(int8_t, int8x8_t, vand, s8)
+VAND_IMPL(uint16_t, uint16x4_t, vand, u16)
+VAND_IMPL(int16_t, int16x4_t, vand, s16)
+VAND_IMPL(uint32_t, uint32x2_t, vand, u32)
+VAND_IMPL(int32_t, int32x2_t, vand, s32)
+VAND_IMPL(uint64_t, uint64x1_t, vand, u64)
+VAND_IMPL(int64_t, int64x1_t, vand, s64)
+
+VAND_IMPL(uint8_t, uint8x16_t, vandq, u8)
+VAND_IMPL(int8_t, int8x16_t, vandq, s8)
+VAND_IMPL(uint16_t, uint16x8_t, vandq, u16)
+VAND_IMPL(int16_t, int16x8_t, vandq, s16)
+VAND_IMPL(uint32_t, uint32x4_t, vandq, u32)
+VAND_IMPL(int32_t, int32x4_t, vandq, s32)
+VAND_IMPL(uint64_t, uint64x2_t, vandq, u64)
+VAND_IMPL(int64_t, int64x2_t, vandq, s64)
+
+#undef VAND_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_AND_H */
diff --git a/src/core/NEON/wrapper/intrinsics/bsl.h b/src/core/NEON/wrapper/intrinsics/bsl.h
new file mode 100644
index 0000000000..01c1cce3a6
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/bsl.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_BSL_H
+#define ARM_COMPUTE_WRAPPER_BSL_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VBSL_IMPL(stype, vtype, ctype, prefix, postfix) \
+ inline vtype vbsl(const ctype &a, const vtype &b, const vtype &c) \
+ { \
+ return prefix##_##postfix(a, b, c); \
+ }
+
+VBSL_IMPL(uint8_t, uint8x8_t, uint8x8_t, vbsl, u8)
+VBSL_IMPL(int8_t, int8x8_t, uint8x8_t, vbsl, s8)
+VBSL_IMPL(uint16_t, uint16x4_t, uint16x4_t, vbsl, u16)
+VBSL_IMPL(int16_t, int16x4_t, uint16x4_t, vbsl, s16)
+VBSL_IMPL(uint32_t, uint32x2_t, uint32x2_t, vbsl, u32)
+VBSL_IMPL(int32_t, int32x2_t, uint32x2_t, vbsl, s32)
+VBSL_IMPL(float32x2_t, float32x2_t, uint32x2_t, vbsl, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VBSL_IMPL(float16x4_t, float16x4_t, uint16x4_t, vbsl, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VBSL_IMPL(uint8_t, uint8x16_t, uint8x16_t, vbslq, u8)
+VBSL_IMPL(int8_t, int8x16_t, uint8x16_t, vbslq, s8)
+VBSL_IMPL(uint16_t, uint16x8_t, uint16x8_t, vbslq, u16)
+VBSL_IMPL(int16_t, int16x8_t, uint16x8_t, vbslq, s16)
+VBSL_IMPL(uint32_t, uint32x4_t, uint32x4_t, vbslq, u32)
+VBSL_IMPL(int32_t, int32x4_t, uint32x4_t, vbslq, s32)
+VBSL_IMPL(float32x4_t, float32x4_t, uint32x4_t, vbslq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VBSL_IMPL(float16x8_t, float16x8_t, uint16x8_t, vbslq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VBSL_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_BSL_H */
diff --git a/src/core/NEON/wrapper/intrinsics/ceq.h b/src/core/NEON/wrapper/intrinsics/ceq.h
new file mode 100644
index 0000000000..b0324e63db
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/ceq.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CEQ_H
+#define ARM_COMPUTE_WRAPPER_CEQ_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCEQ_IMPL(votype, vtype, prefix, postfix) \
+ inline votype vceq(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VCEQ_IMPL(uint8x8_t, uint8x8_t, vceq, u8)
+VCEQ_IMPL(uint8x8_t, int8x8_t, vceq, s8)
+VCEQ_IMPL(uint16x4_t, uint16x4_t, vceq, u16)
+VCEQ_IMPL(uint16x4_t, int16x4_t, vceq, s16)
+VCEQ_IMPL(uint32x2_t, uint32x2_t, vceq, u32)
+VCEQ_IMPL(uint32x2_t, int32x2_t, vceq, s32)
+VCEQ_IMPL(uint32x2_t, float32x2_t, vceq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCEQ_IMPL(uint16x4_t, float16x4_t, vceq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCEQ_IMPL(uint8x16_t, uint8x16_t, vceqq, u8)
+VCEQ_IMPL(uint8x16_t, int8x16_t, vceqq, s8)
+VCEQ_IMPL(uint16x8_t, uint16x8_t, vceqq, u16)
+VCEQ_IMPL(uint16x8_t, int16x8_t, vceqq, s16)
+VCEQ_IMPL(uint32x4_t, uint32x4_t, vceqq, u32)
+VCEQ_IMPL(uint32x4_t, int32x4_t, vceqq, s32)
+VCEQ_IMPL(uint32x4_t, float32x4_t, vceqq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCEQ_IMPL(uint16x8_t, float16x8_t, vceqq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCEQ_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_CEQ_H */
diff --git a/src/core/NEON/wrapper/intrinsics/cge.h b/src/core/NEON/wrapper/intrinsics/cge.h
new file mode 100644
index 0000000000..e4a7fcd423
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/cge.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CGE_H
+#define ARM_COMPUTE_WRAPPER_CGE_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCGE_IMPL(stype, vtype, rtype, prefix, postfix) \
+ inline rtype vcge(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VCGE_IMPL(uint8_t, uint8x8_t, uint8x8_t, vcge, u8)
+VCGE_IMPL(int8_t, int8x8_t, uint8x8_t, vcge, s8)
+VCGE_IMPL(uint16_t, uint16x4_t, uint16x4_t, vcge, u16)
+VCGE_IMPL(int16_t, int16x4_t, uint16x4_t, vcge, s16)
+VCGE_IMPL(uint32_t, uint32x2_t, uint32x2_t, vcge, u32)
+VCGE_IMPL(int32_t, int32x2_t, uint32x2_t, vcge, s32)
+VCGE_IMPL(float32x2_t, float32x2_t, uint32x2_t, vcge, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGE_IMPL(float16x4_t, float16x4_t, uint16x4_t, vcge, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCGE_IMPL(uint8_t, uint8x16_t, uint8x16_t, vcgeq, u8)
+VCGE_IMPL(int8_t, int8x16_t, uint8x16_t, vcgeq, s8)
+VCGE_IMPL(uint16_t, uint16x8_t, uint16x8_t, vcgeq, u16)
+VCGE_IMPL(int16_t, int16x8_t, uint16x8_t, vcgeq, s16)
+VCGE_IMPL(uint32_t, uint32x4_t, uint32x4_t, vcgeq, u32)
+VCGE_IMPL(int32_t, int32x4_t, uint32x4_t, vcgeq, s32)
+VCGE_IMPL(float32x4_t, float32x4_t, uint32x4_t, vcgeq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGE_IMPL(float16x8_t, float16x8_t, uint16x8_t, vcgeq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCGE_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_CGE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/cgt.h b/src/core/NEON/wrapper/intrinsics/cgt.h
new file mode 100644
index 0000000000..f34d02fd1b
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/cgt.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CGT_H
+#define ARM_COMPUTE_WRAPPER_CGT_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCGT_IMPL(rtype, vtype, prefix, postfix) \
+ inline rtype vcgt(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VCGT_IMPL(uint8x8_t, uint8x8_t, vcgt, u8)
+VCGT_IMPL(uint8x8_t, int8x8_t, vcgt, s8)
+VCGT_IMPL(uint16x4_t, uint16x4_t, vcgt, u16)
+VCGT_IMPL(uint16x4_t, int16x4_t, vcgt, s16)
+VCGT_IMPL(uint32x2_t, uint32x2_t, vcgt, u32)
+VCGT_IMPL(uint32x2_t, int32x2_t, vcgt, s32)
+VCGT_IMPL(uint32x2_t, float32x2_t, vcgt, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGT_IMPL(uint16x4_t, float16x4_t, vcgt, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCGT_IMPL(uint8x16_t, uint8x16_t, vcgtq, u8)
+VCGT_IMPL(uint8x16_t, int8x16_t, vcgtq, s8)
+VCGT_IMPL(uint16x8_t, uint16x8_t, vcgtq, u16)
+VCGT_IMPL(uint16x8_t, int16x8_t, vcgtq, s16)
+VCGT_IMPL(uint32x4_t, uint32x4_t, vcgtq, u32)
+VCGT_IMPL(uint32x4_t, int32x4_t, vcgtq, s32)
+VCGT_IMPL(uint32x4_t, float32x4_t, vcgtq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGT_IMPL(uint16x8_t, float16x8_t, vcgtq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCGT_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_CGT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/cgtz.h b/src/core/NEON/wrapper/intrinsics/cgtz.h
new file mode 100644
index 0000000000..025a7ba976
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/cgtz.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CGTZ_H
+#define ARM_COMPUTE_WRAPPER_CGTZ_H
+
+#ifdef __aarch64__
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCGTZ_IMPL(vtype, rtype, prefix, postfix) \
+ inline rtype vcgtz(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VCGTZ_IMPL(int8x8_t, uint8x8_t, vcgtz, s8)
+VCGTZ_IMPL(int16x4_t, uint16x4_t, vcgtz, s16)
+VCGTZ_IMPL(int32x2_t, uint32x2_t, vcgtz, s32)
+VCGTZ_IMPL(float32x2_t, uint32x2_t, vcgtz, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGTZ_IMPL(float16x4_t, uint16x4_t, vcgtz, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCGTZ_IMPL(int8x16_t, uint8x16_t, vcgtzq, s8)
+VCGTZ_IMPL(int16x8_t, uint16x8_t, vcgtzq, s16)
+VCGTZ_IMPL(int32x4_t, uint32x4_t, vcgtzq, s32)
+VCGTZ_IMPL(float32x4_t, uint32x4_t, vcgtzq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCGTZ_IMPL(float16x8_t, uint16x8_t, vcgtzq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCGTZ_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif // __aarch64__
+#endif /* ARM_COMPUTE_WRAPPER_CGTZ_H */
diff --git a/src/core/NEON/wrapper/intrinsics/cle.h b/src/core/NEON/wrapper/intrinsics/cle.h
new file mode 100644
index 0000000000..50c175f0c8
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/cle.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CLE_H
+#define ARM_COMPUTE_WRAPPER_CLE_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCLE_IMPL(stype, vtype, rtype, prefix, postfix) \
+ inline rtype vcle(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VCLE_IMPL(uint8_t, uint8x8_t, uint8x8_t, vcle, u8)
+VCLE_IMPL(int8_t, int8x8_t, uint8x8_t, vcle, s8)
+VCLE_IMPL(uint16_t, uint16x4_t, uint16x4_t, vcle, u16)
+VCLE_IMPL(int16_t, int16x4_t, uint16x4_t, vcle, s16)
+VCLE_IMPL(uint32_t, uint32x2_t, uint32x2_t, vcle, u32)
+VCLE_IMPL(int32_t, int32x2_t, uint32x2_t, vcle, s32)
+VCLE_IMPL(float32x2_t, float32x2_t, uint32x2_t, vcle, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCLE_IMPL(float16x4_t, float16x4_t, uint16x4_t, vcle, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCLE_IMPL(uint8_t, uint8x16_t, uint8x16_t, vcleq, u8)
+VCLE_IMPL(int8_t, int8x16_t, uint8x16_t, vcleq, s8)
+VCLE_IMPL(uint16_t, uint16x8_t, uint16x8_t, vcleq, u16)
+VCLE_IMPL(int16_t, int16x8_t, uint16x8_t, vcleq, s16)
+VCLE_IMPL(uint32_t, uint32x4_t, uint32x4_t, vcleq, u32)
+VCLE_IMPL(int32_t, int32x4_t, uint32x4_t, vcleq, s32)
+VCLE_IMPL(float32x4_t, float32x4_t, uint32x4_t, vcleq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCLE_IMPL(float16x8_t, float16x8_t, uint16x8_t, vcleq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCLE_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_CLE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/clt.h b/src/core/NEON/wrapper/intrinsics/clt.h
new file mode 100644
index 0000000000..10fd320e4c
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/clt.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CLT_H
+#define ARM_COMPUTE_WRAPPER_CLT_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCLT_IMPL(votype, vtype, prefix, postfix) \
+ inline votype vclt(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VCLT_IMPL(uint8x8_t, uint8x8_t, vclt, u8)
+VCLT_IMPL(uint8x8_t, int8x8_t, vclt, s8)
+VCLT_IMPL(uint16x4_t, uint16x4_t, vclt, u16)
+VCLT_IMPL(uint16x4_t, int16x4_t, vclt, s16)
+VCLT_IMPL(uint32x2_t, uint32x2_t, vclt, u32)
+VCLT_IMPL(uint32x2_t, int32x2_t, vclt, s32)
+VCLT_IMPL(uint32x2_t, float32x2_t, vclt, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCLT_IMPL(uint16x4_t, float16x4_t, vclt, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VCLT_IMPL(uint8x16_t, uint8x16_t, vcltq, u8)
+VCLT_IMPL(uint8x16_t, int8x16_t, vcltq, s8)
+VCLT_IMPL(uint16x8_t, uint16x8_t, vcltq, u16)
+VCLT_IMPL(uint16x8_t, int16x8_t, vcltq, s16)
+VCLT_IMPL(uint32x4_t, uint32x4_t, vcltq, u32)
+VCLT_IMPL(uint32x4_t, int32x4_t, vcltq, s32)
+VCLT_IMPL(uint32x4_t, float32x4_t, vcltq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCLT_IMPL(uint16x8_t, float16x8_t, vcltq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCLT_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_CLT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/combine.h b/src/core/NEON/wrapper/intrinsics/combine.h
new file mode 100644
index 0000000000..8b6a588f51
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/combine.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_COMBINE_H
+#define ARM_COMPUTE_WRAPPER_COMBINE_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCOMBINE_IMPL(rtype, vtype, prefix, postfix) \
+ inline rtype vcombine(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VCOMBINE_IMPL(uint8x16_t, uint8x8_t, vcombine, u8)
+VCOMBINE_IMPL(int8x16_t, int8x8_t, vcombine, s8)
+VCOMBINE_IMPL(uint16x8_t, uint16x4_t, vcombine, u16)
+VCOMBINE_IMPL(int16x8_t, int16x4_t, vcombine, s16)
+VCOMBINE_IMPL(uint32x4_t, uint32x2_t, vcombine, u32)
+VCOMBINE_IMPL(int32x4_t, int32x2_t, vcombine, s32)
+VCOMBINE_IMPL(float32x4_t, float32x2_t, vcombine, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCOMBINE_IMPL(float16x8_t, float16x4_t, vcombine, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VCOMBINE_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_COMBINE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/cvt.h b/src/core/NEON/wrapper/intrinsics/cvt.h
new file mode 100644
index 0000000000..381de2284a
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/cvt.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2020, 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_CVT_H
+#define ARM_COMPUTE_WRAPPER_CVT_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VCVT_TO_F32_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float>::value, float32x4_t>::type vcvt(const vtype &a) \
+ { \
+ return prefix##_##postfix1##_##postfix2(a); \
+ }
+
+VCVT_TO_F32_IMPL(float32x4_t, uint32x4_t, vcvtq, f32, u32)
+VCVT_TO_F32_IMPL(float32x4_t, int32x4_t, vcvtq, f32, s32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VCVT_TO_F32_IMPL(float32x4_t, float16x4_t, vcvt, f32, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#undef VCVT_TO_F32_IMPL
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#define VCVT_TO_F16_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float16_t>::value, float16x4_t>::type vcvt(const vtype &a) \
+ { \
+ return prefix##_##postfix1##_##postfix2(a); \
+ }
+
+VCVT_TO_F16_IMPL(float16x4_t, float32x4_t, vcvt, f16, f32)
+#undef VCVT_TO_F16_IMPL
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, uint32_t>::value, uint32x4_t>::type
+vcvt(const float32x4_t &a)
+{
+ return vcvtq_u32_f32(a);
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, int8_t>::value || std::is_same<T, int32_t>::value, int32x4_t>::type
+vcvt(const float32x4_t &a)
+{
+ return vcvtq_s32_f32(a);
+}
+
+#ifdef __aarch64__
+template <typename T>
+inline typename std::enable_if<std::is_same<T, uint32_t>::value, uint32x4_t>::type vcvta(const float32x4_t &a)
+{
+ return vcvtaq_u32_f32(a);
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, int32_t>::value, int32x4_t>::type vcvta(const float32x4_t &a)
+{
+ return vcvtaq_s32_f32(a);
+}
+#endif //__aarch64__
+
+#if defined(ARM_COMPUTE_ENABLE_BF16)
+/** Convert 2x128-bit floating point vectors into 1x128-bit bfloat16 vector
+ *
+ * @param[in] inptr Pointer to the input memory to load values from
+ * @param[in,out] outptr Pointer to the output memory to store values to
+ */
+inline void vcvt_bf16_f32(const float *inptr, uint16_t *outptr)
+{
+ __asm __volatile("ldp q0, q1, [%[inptr]]\n"
+ ".inst 0xea16800\n" // BFCVTN v0, v0
+ ".inst 0x4ea16820\n" // BFCVTN2 v0, v1
+ "str q0, [%[outptr]]\n"
+ : [inptr] "+r"(inptr)
+ : [outptr] "r"(outptr)
+ : "v0", "v1", "memory");
+}
+#endif /* defined(ARM_COMPUTE_ENABLE_BF16) */
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_CVT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/div.h b/src/core/NEON/wrapper/intrinsics/div.h
new file mode 100644
index 0000000000..ece991a5b0
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/div.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_DIV_H
+#define ARM_COMPUTE_WRAPPER_DIV_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#ifdef __aarch64__
+
+#define VDIV_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vdiv(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VDIV_IMPL(float32x2_t, float32x2_t, vdiv, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VDIV_IMPL(float16x4_t, float16x4_t, vdiv, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VDIV_IMPL(float32x4_t, float32x4_t, vdivq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VDIV_IMPL(float16x8_t, float16x8_t, vdivq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#else // __aarch64__
+
+#define VDIV_IMPL(stype, vtype, mul_prefix, inv_prefix, postfix) \
+ inline vtype vdiv(const vtype &a, const vtype &b) \
+ { \
+ return mul_prefix##_##postfix(a, inv_prefix##_##postfix(b)); \
+ }
+VDIV_IMPL(float32x2_t, float32x2_t, vmul, vinv, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VDIV_IMPL(float16x4_t, float16x4_t, vmul, vinv, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VDIV_IMPL(float32x4_t, float32x4_t, vmulq, vinvq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VDIV_IMPL(float16x8_t, float16x8_t, vmulq, vinvq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#endif // __aarch64__
+
+#undef VDIV_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_DIV_H */
diff --git a/src/core/NEON/wrapper/intrinsics/dup_n.h b/src/core/NEON/wrapper/intrinsics/dup_n.h
new file mode 100644
index 0000000000..e745aa4a8c
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/dup_n.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_DUP_N_H
+#define ARM_COMPUTE_WRAPPER_DUP_N_H
+
+#include "src/core/NEON/wrapper/traits.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VDUP_N_IMPL(stype, vtype, prefix, postfix, tag) \
+ inline vtype vdup_n(stype value, tag) \
+ { \
+ return prefix##_##postfix(value); \
+ }
+
+VDUP_N_IMPL(uint8_t, uint8x8_t, vdup_n, u8, traits::vector_64_tag)
+VDUP_N_IMPL(int8_t, int8x8_t, vdup_n, s8, traits::vector_64_tag)
+VDUP_N_IMPL(uint16_t, uint16x4_t, vdup_n, u16, traits::vector_64_tag)
+VDUP_N_IMPL(int16_t, int16x4_t, vdup_n, s16, traits::vector_64_tag)
+VDUP_N_IMPL(uint32_t, uint32x2_t, vdup_n, u32, traits::vector_64_tag)
+VDUP_N_IMPL(int32_t, int32x2_t, vdup_n, s32, traits::vector_64_tag)
+VDUP_N_IMPL(float, float32x2_t, vdup_n, f32, traits::vector_64_tag)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VDUP_N_IMPL(float16_t, float16x4_t, vdup_n, f16, traits::vector_64_tag)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VDUP_N_IMPL(uint8_t, uint8x16_t, vdupq_n, u8, traits::vector_128_tag)
+VDUP_N_IMPL(int8_t, int8x16_t, vdupq_n, s8, traits::vector_128_tag)
+VDUP_N_IMPL(uint16_t, uint16x8_t, vdupq_n, u16, traits::vector_128_tag)
+VDUP_N_IMPL(int16_t, int16x8_t, vdupq_n, s16, traits::vector_128_tag)
+VDUP_N_IMPL(uint32_t, uint32x4_t, vdupq_n, u32, traits::vector_128_tag)
+VDUP_N_IMPL(int32_t, int32x4_t, vdupq_n, s32, traits::vector_128_tag)
+VDUP_N_IMPL(float, float32x4_t, vdupq_n, f32, traits::vector_128_tag)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VDUP_N_IMPL(float16_t, float16x8_t, vdupq_n, f16, traits::vector_128_tag)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VDUP_N_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_DUP_N_H */
diff --git a/src/core/NEON/wrapper/intrinsics/eor.h b/src/core/NEON/wrapper/intrinsics/eor.h
new file mode 100644
index 0000000000..ce88cf59e7
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/eor.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_EOR_H
+#define ARM_COMPUTE_WRAPPER_EOR_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VEOR_IMPL(vtype, prefix, postfix) \
+ inline vtype veor(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VEOR_IMPL(uint8x8_t, veor, u8)
+VEOR_IMPL(int8x8_t, veor, s8)
+VEOR_IMPL(uint16x4_t, veor, u16)
+VEOR_IMPL(int16x4_t, veor, s16)
+VEOR_IMPL(uint32x2_t, veor, u32)
+VEOR_IMPL(int32x2_t, veor, s32)
+
+VEOR_IMPL(uint8x16_t, veorq, u8)
+VEOR_IMPL(int8x16_t, veorq, s8)
+VEOR_IMPL(uint16x8_t, veorq, u16)
+VEOR_IMPL(int16x8_t, veorq, s16)
+VEOR_IMPL(uint32x4_t, veorq, u32)
+VEOR_IMPL(int32x4_t, veorq, s32)
+
+#undef VEOR_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_EOR_H */
diff --git a/src/core/NEON/wrapper/intrinsics/erf.h b/src/core/NEON/wrapper/intrinsics/erf.h
new file mode 100644
index 0000000000..0e34462b96
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/erf.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_WRAPPER_ERF_H
+#define ARM_COMPUTE_WRAPPER_ERF_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VERF_IMPL(vtype, prefix, postfix) \
+ inline vtype verf(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VERF_IMPL(float32x4_t, verfq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VERF_IMPL(float16x8_t, verfq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VERF_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_WRAPPER_ERF_H */
diff --git a/src/core/NEON/wrapper/intrinsics/exp.h b/src/core/NEON/wrapper/intrinsics/exp.h
new file mode 100644
index 0000000000..f44577b926
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/exp.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_EXP_H
+#define ARM_COMPUTE_WRAPPER_EXP_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VEXPQ_IMPL(vtype, postfix) \
+ inline vtype vexpq(const vtype &a) \
+ { \
+ return vexpq_##postfix(a); \
+ }
+
+#define VEXPQ_IMPL_INT(vtype, postfix) \
+ inline vtype vexpq(const vtype &a) \
+ { \
+ ARM_COMPUTE_UNUSED(a); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+VEXPQ_IMPL(float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VEXPQ_IMPL(float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VEXPQ_IMPL_INT(int32x4_t, s32)
+#undef VEXPQ_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_EXP_H */
diff --git a/src/core/NEON/wrapper/intrinsics/ext.h b/src/core/NEON/wrapper/intrinsics/ext.h
new file mode 100644
index 0000000000..d44b231bb2
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/ext.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_EXT_H
+#define ARM_COMPUTE_WRAPPER_EXT_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VEXT_IMPL(vtype, prefix, postfix, size) \
+ inline vtype vext_##size(vtype value_a, vtype value_b) \
+ { \
+ return prefix##_##postfix(value_a, value_b, size); \
+ }
+
+VEXT_IMPL(uint8x8_t, vext, u8, 1)
+VEXT_IMPL(uint8x8_t, vext, u8, 2)
+VEXT_IMPL(int8x8_t, vext, s8, 1)
+VEXT_IMPL(int8x8_t, vext, s8, 2)
+VEXT_IMPL(uint16x4_t, vext, u16, 1)
+VEXT_IMPL(uint16x4_t, vext, u16, 2)
+VEXT_IMPL(int16x4_t, vext, s16, 1)
+VEXT_IMPL(int16x4_t, vext, s16, 2)
+
+VEXT_IMPL(uint8x16_t, vextq, u8, 1)
+VEXT_IMPL(uint8x16_t, vextq, u8, 2)
+VEXT_IMPL(int8x16_t, vextq, s8, 1)
+VEXT_IMPL(int8x16_t, vextq, s8, 2)
+VEXT_IMPL(uint16x8_t, vextq, u16, 1)
+VEXT_IMPL(uint16x8_t, vextq, u16, 2)
+VEXT_IMPL(int16x8_t, vextq, s16, 1)
+VEXT_IMPL(int16x8_t, vextq, s16, 2)
+VEXT_IMPL(int32x4_t, vextq, s32, 1)
+VEXT_IMPL(int32x4_t, vextq, s32, 2)
+
+#undef VEXT_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_EXT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/gethigh.h b/src/core/NEON/wrapper/intrinsics/gethigh.h
new file mode 100644
index 0000000000..d098a27335
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/gethigh.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_GET_HIGH_H
+#define ARM_COMPUTE_WRAPPER_GET_HIGH_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VGETHIGH_IMPL(half_vtype, vtype, postfix) \
+ inline half_vtype vgethigh(const vtype val) \
+ { \
+ return vget_high_##postfix(val); \
+ }
+
+VGETHIGH_IMPL(uint8x8_t, uint8x16_t, u8)
+VGETHIGH_IMPL(int8x8_t, int8x16_t, s8)
+VGETHIGH_IMPL(uint16x4_t, uint16x8_t, u16)
+VGETHIGH_IMPL(int16x4_t, int16x8_t, s16)
+VGETHIGH_IMPL(uint32x2_t, uint32x4_t, u32)
+VGETHIGH_IMPL(int32x2_t, int32x4_t, s32)
+VGETHIGH_IMPL(float32x2_t, float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VGETHIGH_IMPL(float16x4_t, float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VGETHIGH_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_GET_HIGH_H */
diff --git a/src/core/NEON/wrapper/intrinsics/getlane.h b/src/core/NEON/wrapper/intrinsics/getlane.h
new file mode 100644
index 0000000000..ae813bb2fa
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/getlane.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_GET_LANE_H
+#define ARM_COMPUTE_WRAPPER_GET_LANE_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VGETLANE_IMPL_8(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ case 2: \
+ return vget_lane_##postfix(vector, 2); \
+ case 3: \
+ return vget_lane_##postfix(vector, 3); \
+ case 4: \
+ return vget_lane_##postfix(vector, 4); \
+ case 5: \
+ return vget_lane_##postfix(vector, 5); \
+ case 6: \
+ return vget_lane_##postfix(vector, 6); \
+ case 7: \
+ return vget_lane_##postfix(vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VGETLANE_IMPL_4(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ case 2: \
+ return vget_lane_##postfix(vector, 2); \
+ case 3: \
+ return vget_lane_##postfix(vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VGETLANE_IMPL_2(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vget_lane_##postfix(vector, 0); \
+ case 1: \
+ return vget_lane_##postfix(vector, 1); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VGETLANE_IMPL_8(uint8_t, uint8x8_t, u8)
+VGETLANE_IMPL_8(int8_t, int8x8_t, s8)
+VGETLANE_IMPL_4(uint16_t, uint16x4_t, u16)
+VGETLANE_IMPL_4(int16_t, int16x4_t, s16)
+VGETLANE_IMPL_2(uint32_t, uint32x2_t, u32)
+VGETLANE_IMPL_2(int32_t, int32x2_t, s32)
+VGETLANE_IMPL_2(float, float32x2_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VGETLANE_IMPL_4(float16_t, float16x4_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#define VGETQLANE_IMPL_16(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ case 4: \
+ return vgetq_lane_##postfix(vector, 4); \
+ case 5: \
+ return vgetq_lane_##postfix(vector, 5); \
+ case 6: \
+ return vgetq_lane_##postfix(vector, 6); \
+ case 7: \
+ return vgetq_lane_##postfix(vector, 7); \
+ case 8: \
+ return vgetq_lane_##postfix(vector, 8); \
+ case 9: \
+ return vgetq_lane_##postfix(vector, 9); \
+ case 10: \
+ return vgetq_lane_##postfix(vector, 10); \
+ case 11: \
+ return vgetq_lane_##postfix(vector, 11); \
+ case 12: \
+ return vgetq_lane_##postfix(vector, 12); \
+ case 13: \
+ return vgetq_lane_##postfix(vector, 13); \
+ case 14: \
+ return vgetq_lane_##postfix(vector, 14); \
+ case 15: \
+ return vgetq_lane_##postfix(vector, 15); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VGETQLANE_IMPL_8(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ case 4: \
+ return vgetq_lane_##postfix(vector, 4); \
+ case 5: \
+ return vgetq_lane_##postfix(vector, 5); \
+ case 6: \
+ return vgetq_lane_##postfix(vector, 6); \
+ case 7: \
+ return vgetq_lane_##postfix(vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VGETQLANE_IMPL_4(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ case 2: \
+ return vgetq_lane_##postfix(vector, 2); \
+ case 3: \
+ return vgetq_lane_##postfix(vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VGETQLANE_IMPL_2(stype, vtype, postfix) \
+ inline stype vgetlane(const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vgetq_lane_##postfix(vector, 0); \
+ case 1: \
+ return vgetq_lane_##postfix(vector, 1); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VGETQLANE_IMPL_16(uint8_t, uint8x16_t, u8)
+VGETQLANE_IMPL_16(int8_t, int8x16_t, s8)
+VGETQLANE_IMPL_8(uint16_t, uint16x8_t, u16)
+VGETQLANE_IMPL_8(int16_t, int16x8_t, s16)
+VGETQLANE_IMPL_4(uint32_t, uint32x4_t, u32)
+VGETQLANE_IMPL_4(int32_t, int32x4_t, s32)
+VGETQLANE_IMPL_4(float, float32x4_t, f32)
+VGETQLANE_IMPL_2(int64_t, int64x2_t, s64)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VGETQLANE_IMPL_8(float16_t, float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VGETLANE_IMPL_8
+#undef VGETLANE_IMPL_4
+#undef VGETLANE_IMPL_2
+
+#undef VGETQLANE_IMPL_16
+#undef VGETQLANE_IMPL_8
+#undef VGETQLANE_IMPL_4
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_GET_LANE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/getlow.h b/src/core/NEON/wrapper/intrinsics/getlow.h
new file mode 100644
index 0000000000..b5469f0eab
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/getlow.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_GET_LOW_H
+#define ARM_COMPUTE_WRAPPER_GET_LOW_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VGETLOW_IMPL(half_vtype, vtype, postfix) \
+ inline half_vtype vgetlow(const vtype val) \
+ { \
+ return vget_low_##postfix(val); \
+ }
+
+VGETLOW_IMPL(uint8x8_t, uint8x16_t, u8)
+VGETLOW_IMPL(int8x8_t, int8x16_t, s8)
+VGETLOW_IMPL(uint16x4_t, uint16x8_t, u16)
+VGETLOW_IMPL(int16x4_t, int16x8_t, s16)
+VGETLOW_IMPL(uint32x2_t, uint32x4_t, u32)
+VGETLOW_IMPL(int32x2_t, int32x4_t, s32)
+VGETLOW_IMPL(float32x2_t, float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VGETLOW_IMPL(float16x4_t, float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VGETLOW_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_GET_LOW_H */
diff --git a/src/core/NEON/wrapper/intrinsics/intrinsics.h b/src/core/NEON/wrapper/intrinsics/intrinsics.h
new file mode 100644
index 0000000000..97975ebe7c
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_INTRINSICS_H
+#define ARM_COMPUTE_WRAPPER_INTRINSICS_H
+
+#include "src/core/NEON/wrapper/intrinsics/abs.h"
+#include "src/core/NEON/wrapper/intrinsics/add.h"
+#include "src/core/NEON/wrapper/intrinsics/and.h"
+#include "src/core/NEON/wrapper/intrinsics/bsl.h"
+#include "src/core/NEON/wrapper/intrinsics/ceq.h"
+#include "src/core/NEON/wrapper/intrinsics/cge.h"
+#include "src/core/NEON/wrapper/intrinsics/cgt.h"
+#include "src/core/NEON/wrapper/intrinsics/cgtz.h"
+#include "src/core/NEON/wrapper/intrinsics/cle.h"
+#include "src/core/NEON/wrapper/intrinsics/clt.h"
+#include "src/core/NEON/wrapper/intrinsics/combine.h"
+#include "src/core/NEON/wrapper/intrinsics/cvt.h"
+#include "src/core/NEON/wrapper/intrinsics/div.h"
+#include "src/core/NEON/wrapper/intrinsics/dup_n.h"
+#include "src/core/NEON/wrapper/intrinsics/eor.h"
+#include "src/core/NEON/wrapper/intrinsics/erf.h"
+#include "src/core/NEON/wrapper/intrinsics/exp.h"
+#include "src/core/NEON/wrapper/intrinsics/ext.h"
+#include "src/core/NEON/wrapper/intrinsics/gethigh.h"
+#include "src/core/NEON/wrapper/intrinsics/getlane.h"
+#include "src/core/NEON/wrapper/intrinsics/getlow.h"
+#include "src/core/NEON/wrapper/intrinsics/inv.h"
+#include "src/core/NEON/wrapper/intrinsics/invsqrt.h"
+#include "src/core/NEON/wrapper/intrinsics/load.h"
+#include "src/core/NEON/wrapper/intrinsics/log.h"
+#include "src/core/NEON/wrapper/intrinsics/max.h"
+#include "src/core/NEON/wrapper/intrinsics/min.h"
+#include "src/core/NEON/wrapper/intrinsics/mla.h"
+#include "src/core/NEON/wrapper/intrinsics/movl.h"
+#include "src/core/NEON/wrapper/intrinsics/movn.h"
+#include "src/core/NEON/wrapper/intrinsics/mul.h"
+#include "src/core/NEON/wrapper/intrinsics/neg.h"
+#include "src/core/NEON/wrapper/intrinsics/not.h"
+#include "src/core/NEON/wrapper/intrinsics/orr.h"
+#include "src/core/NEON/wrapper/intrinsics/pmax.h"
+#include "src/core/NEON/wrapper/intrinsics/pmin.h"
+#include "src/core/NEON/wrapper/intrinsics/pow.h"
+#include "src/core/NEON/wrapper/intrinsics/qmov.h"
+#include "src/core/NEON/wrapper/intrinsics/qmovun.h"
+#include "src/core/NEON/wrapper/intrinsics/reinterpret.h"
+#include "src/core/NEON/wrapper/intrinsics/rev64.h"
+#include "src/core/NEON/wrapper/intrinsics/round.h"
+#include "src/core/NEON/wrapper/intrinsics/setlane.h"
+#include "src/core/NEON/wrapper/intrinsics/shr.h"
+#include "src/core/NEON/wrapper/intrinsics/sin.h"
+#include "src/core/NEON/wrapper/intrinsics/sqrt.h"
+#include "src/core/NEON/wrapper/intrinsics/store.h"
+#include "src/core/NEON/wrapper/intrinsics/sub.h"
+#include "src/core/NEON/wrapper/intrinsics/tanh.h"
+#include "src/core/NEON/wrapper/intrinsics/tbl.h"
+
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/wrapper/intrinsics/svcnt.h"
+#include "src/core/NEON/wrapper/intrinsics/svcvt.h"
+#include "src/core/NEON/wrapper/intrinsics/svdup_n.h"
+#include "src/core/NEON/wrapper/intrinsics/svexp.h"
+#include "src/core/NEON/wrapper/intrinsics/svlog.h"
+#include "src/core/NEON/wrapper/intrinsics/svpow.h"
+#include "src/core/NEON/wrapper/intrinsics/svptrue.h"
+#include "src/core/NEON/wrapper/intrinsics/svqadd.h"
+#include "src/core/NEON/wrapper/intrinsics/svsin.h"
+#include "src/core/NEON/wrapper/intrinsics/svwhilelt.h"
+#endif /* defined(__ARM_FEATURE_SVE) */
+
+#endif /* ARM_COMPUTE_WRAPPER_INTRINSICS_H */
diff --git a/src/core/NEON/wrapper/intrinsics/inv.h b/src/core/NEON/wrapper/intrinsics/inv.h
new file mode 100644
index 0000000000..e443be679b
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/inv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_INV_H
+#define ARM_COMPUTE_WRAPPER_INV_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VINV_IMPL(vtype, prefix, postfix) \
+ inline vtype vinv(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+#define VINV_IMPL_INT(vtype, prefix, postfix) \
+ inline vtype vinv(const vtype &a) \
+ { \
+ ARM_COMPUTE_UNUSED(a); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+VINV_IMPL(float32x2_t, vinv, f32)
+VINV_IMPL_INT(int32x2_t, vinv, s32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINV_IMPL(float16x4_t, vinv, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VINV_IMPL(float32x4_t, vinvq, f32)
+VINV_IMPL_INT(int32x4_t, vinvq, s32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINV_IMPL(float16x8_t, vinvq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VINV_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_INV_H */
diff --git a/src/core/NEON/wrapper/intrinsics/invsqrt.h b/src/core/NEON/wrapper/intrinsics/invsqrt.h
new file mode 100644
index 0000000000..257b445cc7
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/invsqrt.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_INVSQRT_H
+#define ARM_COMPUTE_WRAPPER_INVSQRT_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VINVSQRT_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vinvsqrt(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+#define VINVSQRT_IMPL_INT(stype, vtype, prefix, postfix) \
+ inline vtype vinvsqrt(const vtype &a) \
+ { \
+ ARM_COMPUTE_UNUSED(a); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+VINVSQRT_IMPL(float, float32x2_t, vinvsqrt, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINVSQRT_IMPL(float16_t, float16x4_t, vinvsqrt, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINVSQRT_IMPL_INT(int, int32x4_t, vinvsqrt, s32)
+
+VINVSQRT_IMPL(float, float32x4_t, vinvsqrtq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VINVSQRT_IMPL(float16_t, float16x8_t, vinvsqrtq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VINVSQRT_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_INVSQRT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/load.h b/src/core/NEON/wrapper/intrinsics/load.h
new file mode 100644
index 0000000000..a2116c028b
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/load.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_LOAD_H
+#define ARM_COMPUTE_WRAPPER_LOAD_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VLOAD_IMPL(stype, vtype, postfix) \
+ inline vtype vload(const stype *ptr) \
+ { \
+ return vld1_##postfix(ptr); \
+ }
+
+VLOAD_IMPL(uint8_t, uint8x8_t, u8)
+VLOAD_IMPL(int8_t, int8x8_t, s8)
+VLOAD_IMPL(uint16_t, uint16x4_t, u16)
+VLOAD_IMPL(int16_t, int16x4_t, s16)
+VLOAD_IMPL(uint32_t, uint32x2_t, u32)
+VLOAD_IMPL(int32_t, int32x2_t, s32)
+//VLOAD_IMPL(uint64_t, uint64x1_t, u64)
+//VLOAD_IMPL(int64_t, int64x1_t, s64)
+VLOAD_IMPL(float, float32x2_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VLOAD_IMPL(float16_t, float16x4_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#define VLOADQ_IMPL(stype, vtype, postfix) \
+ inline vtype vloadq(const stype *ptr) \
+ { \
+ return vld1q_##postfix(ptr); \
+ }
+
+VLOADQ_IMPL(uint8_t, uint8x16_t, u8)
+VLOADQ_IMPL(int8_t, int8x16_t, s8)
+VLOADQ_IMPL(uint16_t, uint16x8_t, u16)
+VLOADQ_IMPL(int16_t, int16x8_t, s16)
+VLOADQ_IMPL(uint32_t, uint32x4_t, u32)
+VLOADQ_IMPL(int32_t, int32x4_t, s32)
+//VLOAD_IMPL(uint64_t, uint64x1_t, u64)
+//VLOAD_IMPL(int64_t, int64x1_t, s64)
+VLOADQ_IMPL(float, float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VLOADQ_IMPL(float16_t, float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#undef VLOAD_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_LOAD_H */
diff --git a/src/core/NEON/wrapper/intrinsics/log.h b/src/core/NEON/wrapper/intrinsics/log.h
new file mode 100644
index 0000000000..d091407edb
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/log.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_LOG_H
+#define ARM_COMPUTE_WRAPPER_LOG_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VLOG_IMPL(vtype, prefix, postfix) \
+ inline vtype vlog(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+#define VLOG_IMPL_INT(vtype, prefix, postfix) \
+ inline vtype vlog(const vtype &a) \
+ { \
+ ARM_COMPUTE_UNUSED(a); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+VLOG_IMPL(float32x4_t, vlogq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VLOG_IMPL(float16x8_t, vlogq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VLOG_IMPL_INT(int32x4_t, vlogq, s32)
+
+#undef VLOG_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_LOG_H */
diff --git a/src/core/NEON/wrapper/intrinsics/max.h b/src/core/NEON/wrapper/intrinsics/max.h
new file mode 100644
index 0000000000..32d38a856c
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/max.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018-2020, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CORE_NEON_WRAPPER_INTRINSICS_MAX_H
+#define ACL_SRC_CORE_NEON_WRAPPER_INTRINSICS_MAX_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMAX_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmax(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VMAX_IMPL(uint8_t, uint8x8_t, vmax, u8)
+VMAX_IMPL(int8_t, int8x8_t, vmax, s8)
+VMAX_IMPL(uint16_t, uint16x4_t, vmax, u16)
+VMAX_IMPL(int16_t, int16x4_t, vmax, s16)
+VMAX_IMPL(uint32_t, uint32x2_t, vmax, u32)
+VMAX_IMPL(int32_t, int32x2_t, vmax, s32)
+VMAX_IMPL(float, float32x2_t, vmax, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMAX_IMPL(float16_t, float16x4_t, vmax, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VMAX_IMPL(uint8_t, uint8x16_t, vmaxq, u8)
+VMAX_IMPL(int8_t, int8x16_t, vmaxq, s8)
+VMAX_IMPL(uint16_t, uint16x8_t, vmaxq, u16)
+VMAX_IMPL(int16_t, int16x8_t, vmaxq, s16)
+VMAX_IMPL(uint32_t, uint32x4_t, vmaxq, u32)
+VMAX_IMPL(int32_t, int32x4_t, vmaxq, s32)
+VMAX_IMPL(float, float32x4_t, vmaxq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMAX_IMPL(float16_t, float16x8_t, vmaxq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VMAX_IMPL
+
+#if defined(__aarch64__)
+// VMAXV: Across vector max
+#define VMAXV_IMPL(stype, vtype, prefix, postfix) \
+ inline stype vmaxv(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VMAXV_IMPL(uint8_t, uint8x8_t, vmaxv, u8)
+VMAXV_IMPL(int8_t, int8x8_t, vmaxv, s8)
+VMAXV_IMPL(uint16_t, uint16x4_t, vmaxv, u16)
+VMAXV_IMPL(int16_t, int16x4_t, vmaxv, s16)
+VMAXV_IMPL(uint32_t, uint32x2_t, vmaxv, u32)
+VMAXV_IMPL(int32_t, int32x2_t, vmaxv, s32)
+VMAXV_IMPL(float, float32x2_t, vmaxv, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMAXV_IMPL(float16_t, float16x4_t, vmaxv, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VMAXV_IMPL(uint8_t, uint8x16_t, vmaxvq, u8)
+VMAXV_IMPL(int8_t, int8x16_t, vmaxvq, s8)
+VMAXV_IMPL(uint16_t, uint16x8_t, vmaxvq, u16)
+VMAXV_IMPL(int16_t, int16x8_t, vmaxvq, s16)
+VMAXV_IMPL(uint32_t, uint32x4_t, vmaxvq, u32)
+VMAXV_IMPL(int32_t, int32x4_t, vmaxvq, s32)
+VMAXV_IMPL(float, float32x4_t, vmaxvq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMAXV_IMPL(float16_t, float16x8_t, vmaxvq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VMAXV_IMPL
+#endif // defined(__aarch64__)
+} // namespace wrapper
+} // namespace arm_compute
+#endif // ACL_SRC_CORE_NEON_WRAPPER_INTRINSICS_MAX_H
diff --git a/src/core/NEON/wrapper/intrinsics/min.h b/src/core/NEON/wrapper/intrinsics/min.h
new file mode 100644
index 0000000000..8afcb3cb10
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/min.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_MIN_H
+#define ARM_COMPUTE_WRAPPER_MIN_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMIN_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmin(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VMIN_IMPL(uint8_t, uint8x8_t, vmin, u8)
+VMIN_IMPL(int8_t, int8x8_t, vmin, s8)
+VMIN_IMPL(uint16_t, uint16x4_t, vmin, u16)
+VMIN_IMPL(int16_t, int16x4_t, vmin, s16)
+VMIN_IMPL(uint32_t, uint32x2_t, vmin, u32)
+VMIN_IMPL(int32_t, int32x2_t, vmin, s32)
+VMIN_IMPL(float, float32x2_t, vmin, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMIN_IMPL(float16_t, float16x4_t, vmin, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VMIN_IMPL(uint8_t, uint8x16_t, vminq, u8)
+VMIN_IMPL(int8_t, int8x16_t, vminq, s8)
+VMIN_IMPL(uint16_t, uint16x8_t, vminq, u16)
+VMIN_IMPL(int16_t, int16x8_t, vminq, s16)
+VMIN_IMPL(uint32_t, uint32x4_t, vminq, u32)
+VMIN_IMPL(int32_t, int32x4_t, vminq, s32)
+VMIN_IMPL(float, float32x4_t, vminq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMIN_IMPL(float16_t, float16x8_t, vminq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VMIN_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_MIN_H */
diff --git a/src/core/NEON/wrapper/intrinsics/mla.h b/src/core/NEON/wrapper/intrinsics/mla.h
new file mode 100644
index 0000000000..9fb5a08f9b
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/mla.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_MLA_H
+#define ARM_COMPUTE_WRAPPER_MLA_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMLA_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \
+ { \
+ return prefix##_##postfix(a, b, c); \
+ }
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#define VMLA_IMPL2(stype, vtype, prefix1, prefix2, postfix) \
+ inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \
+ { \
+ return prefix1##_##postfix(a, prefix2##_##postfix(b, c)); \
+ }
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VMLA_IMPL(uint8x8_t, uint8x8_t, vmla, u8)
+VMLA_IMPL(int8x8_t, int8x8_t, vmla, s8)
+VMLA_IMPL(uint16x4_t, uint16x4_t, vmla, u16)
+VMLA_IMPL(int16x4_t, int16x4_t, vmla, s16)
+VMLA_IMPL(uint32x2_t, uint32x2_t, vmla, u32)
+VMLA_IMPL(int32x2_t, int32x2_t, vmla, s32)
+VMLA_IMPL(float32x2_t, float32x2_t, vmla, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMLA_IMPL2(float16x4_t, float16x4_t, vadd, vmul, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VMLA_IMPL(uint8x16_t, uint8x16_t, vmlaq, u8)
+VMLA_IMPL(int8x16_t, int8x16_t, vmlaq, s8)
+VMLA_IMPL(uint16x8_t, uint16x8_t, vmlaq, u16)
+VMLA_IMPL(int16x8_t, int16x8_t, vmlaq, s16)
+VMLA_IMPL(uint32x4_t, uint32x4_t, vmlaq, u32)
+VMLA_IMPL(int32x4_t, int32x4_t, vmlaq, s32)
+VMLA_IMPL(float32x4_t, float32x4_t, vmlaq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMLA_IMPL2(float16x8_t, float16x8_t, vaddq, vmulq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VMLA_IMPL
+
+#define VMLAL_IMPL(vtype_in, vtype_out, postfix) \
+ inline vtype_out vmlal(const vtype_out &a, const vtype_in &b, const vtype_in &c) \
+ { \
+ return vmlal_##postfix(a, b, c); \
+ }
+
+VMLAL_IMPL(uint8x8_t, uint16x8_t, u8)
+VMLAL_IMPL(int8x8_t, int16x8_t, s8)
+VMLAL_IMPL(uint16x4_t, uint32x4_t, u16)
+VMLAL_IMPL(int16x4_t, int32x4_t, s16)
+VMLAL_IMPL(uint32x2_t, uint64x2_t, u32)
+VMLAL_IMPL(int32x2_t, int64x2_t, s32)
+
+#undef VMLAL_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_MLA_H */
diff --git a/src/core/NEON/wrapper/intrinsics/movl.h b/src/core/NEON/wrapper/intrinsics/movl.h
new file mode 100644
index 0000000000..99f2150eab
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/movl.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_MOVL_H
+#define ARM_COMPUTE_WRAPPER_MOVL_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMOVL_IMPL(ptype, vtype, prefix, postfix) \
+ inline ptype vmovl(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VMOVL_IMPL(uint16x8_t, uint8x8_t, vmovl, u8)
+VMOVL_IMPL(int16x8_t, int8x8_t, vmovl, s8)
+VMOVL_IMPL(uint32x4_t, uint16x4_t, vmovl, u16)
+VMOVL_IMPL(int32x4_t, int16x4_t, vmovl, s16)
+VMOVL_IMPL(uint64x2_t, uint32x2_t, vmovl, u32)
+VMOVL_IMPL(int64x2_t, int32x2_t, vmovl, s32)
+
+#undef VMOVL_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_MOVL_H */
diff --git a/src/core/NEON/wrapper/intrinsics/movn.h b/src/core/NEON/wrapper/intrinsics/movn.h
new file mode 100644
index 0000000000..460c277540
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/movn.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_MOVN_H
+#define ARM_COMPUTE_WRAPPER_MOVN_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMOVN_IMPL(dtype, vtype, prefix, postfix) \
+ inline dtype vmovn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VMOVN_IMPL(uint32x2_t, uint64x2_t, vmovn, u64)
+VMOVN_IMPL(int32x2_t, int64x2_t, vmovn, s64)
+VMOVN_IMPL(uint16x4_t, uint32x4_t, vmovn, u32)
+VMOVN_IMPL(int16x4_t, int32x4_t, vmovn, s32)
+VMOVN_IMPL(uint8x8_t, uint16x8_t, vmovn, u16)
+VMOVN_IMPL(int8x8_t, int16x8_t, vmovn, s16)
+
+#define VQMOVN_IMPL(dtype, vtype, prefix, postfix) \
+ inline dtype vqmovn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VQMOVN_IMPL(uint32x2_t, uint64x2_t, vqmovn, u64)
+VQMOVN_IMPL(int32x2_t, int64x2_t, vqmovn, s64)
+VQMOVN_IMPL(uint16x4_t, uint32x4_t, vqmovn, u32)
+VQMOVN_IMPL(int16x4_t, int32x4_t, vqmovn, s32)
+VQMOVN_IMPL(uint8x8_t, uint16x8_t, vqmovn, u16)
+VQMOVN_IMPL(int8x8_t, int16x8_t, vqmovn, s16)
+
+#undef VMOVN_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_MOVN_H */
diff --git a/src/core/NEON/wrapper/intrinsics/mul.h b/src/core/NEON/wrapper/intrinsics/mul.h
new file mode 100644
index 0000000000..6296fff35a
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/mul.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_MUL_H
+#define ARM_COMPUTE_WRAPPER_MUL_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMUL_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmul(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VMUL_IMPL(uint8x8_t, uint8x8_t, vmul, u8)
+VMUL_IMPL(int8x8_t, int8x8_t, vmul, s8)
+VMUL_IMPL(uint16x4_t, uint16x4_t, vmul, u16)
+VMUL_IMPL(int16x4_t, int16x4_t, vmul, s16)
+VMUL_IMPL(uint32x2_t, uint32x2_t, vmul, u32)
+VMUL_IMPL(int32x2_t, int32x2_t, vmul, s32)
+VMUL_IMPL(float32x2_t, float32x2_t, vmul, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMUL_IMPL(float16_t, float16x4_t, vmul, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VMUL_IMPL(uint8_t, uint8x16_t, vmulq, u8)
+VMUL_IMPL(int8_t, int8x16_t, vmulq, s8)
+VMUL_IMPL(uint16_t, uint16x8_t, vmulq, u16)
+VMUL_IMPL(int16_t, int16x8_t, vmulq, s16)
+VMUL_IMPL(uint32_t, uint32x4_t, vmulq, u32)
+VMUL_IMPL(int32_t, int32x4_t, vmulq, s32)
+VMUL_IMPL(float32x4_t, float32x4_t, vmulq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VMUL_IMPL(float16_t, float16x8_t, vmulq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VMUL_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_MUL_H */
diff --git a/src/core/NEON/wrapper/intrinsics/neg.h b/src/core/NEON/wrapper/intrinsics/neg.h
new file mode 100644
index 0000000000..5e4556664e
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/neg.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_NEG_H
+#define ARM_COMPUTE_WRAPPER_NEG_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VNEG_IMPL(vtype, prefix, postfix) \
+ inline vtype vneg(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VNEG_IMPL(int8x8_t, vneg, s8)
+VNEG_IMPL(int16x4_t, vneg, s16)
+VNEG_IMPL(int32x2_t, vneg, s32)
+VNEG_IMPL(float32x2_t, vneg, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VNEG_IMPL(float16x4_t, vneg, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VNEG_IMPL(int8x16_t, vnegq, s8)
+VNEG_IMPL(int16x8_t, vnegq, s16)
+VNEG_IMPL(int32x4_t, vnegq, s32)
+VNEG_IMPL(float32x4_t, vnegq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VNEG_IMPL(float16x8_t, vnegq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VNEG_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_NEG_H */
diff --git a/src/core/NEON/wrapper/intrinsics/not.h b/src/core/NEON/wrapper/intrinsics/not.h
new file mode 100644
index 0000000000..5853e849a2
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/not.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_NOT_H
+#define ARM_COMPUTE_WRAPPER_NOT_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VNOT_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vnot(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VNOT_IMPL(uint8_t, uint8x8_t, vmvn, u8)
+VNOT_IMPL(int8_t, int8x8_t, vmvn, s8)
+VNOT_IMPL(uint16_t, uint16x4_t, vmvn, u16)
+VNOT_IMPL(int16_t, int16x4_t, vmvn, s16)
+VNOT_IMPL(uint32_t, uint32x2_t, vmvn, u32)
+VNOT_IMPL(int32_t, int32x2_t, vmvn, s32)
+VNOT_IMPL(float32x2_t, float32x2_t, vinv, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VNOT_IMPL(float16x4_t, float16x4_t, vinv, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VNOT_IMPL(uint8_t, uint8x16_t, vmvnq, u8)
+VNOT_IMPL(int8_t, int8x16_t, vmvnq, s8)
+VNOT_IMPL(uint16_t, uint16x8_t, vmvnq, u16)
+VNOT_IMPL(int16_t, int16x8_t, vmvnq, s16)
+VNOT_IMPL(uint32_t, uint32x4_t, vmvnq, u32)
+VNOT_IMPL(int32_t, int32x4_t, vmvnq, s32)
+VNOT_IMPL(float32x4_t, float32x4_t, vinvq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VNOT_IMPL(float16x8_t, float16x8_t, vinvq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VNOT_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_NOT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/orr.h b/src/core/NEON/wrapper/intrinsics/orr.h
new file mode 100644
index 0000000000..cc83e95d15
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/orr.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_ORR_H
+#define ARM_COMPUTE_WRAPPER_ORR_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VORR_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vorr(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VORR_IMPL(uint8_t, uint8x8_t, vorr, u8)
+VORR_IMPL(int8_t, int8x8_t, vorr, s8)
+VORR_IMPL(uint16_t, uint16x4_t, vorr, u16)
+VORR_IMPL(int16_t, int16x4_t, vorr, s16)
+VORR_IMPL(uint32_t, uint32x2_t, vorr, u32)
+VORR_IMPL(int32_t, int32x2_t, vorr, s32)
+VORR_IMPL(uint64_t, uint64x1_t, vorr, u64)
+VORR_IMPL(int64_t, int64x1_t, vorr, s64)
+
+VORR_IMPL(uint8_t, uint8x16_t, vorrq, u8)
+VORR_IMPL(int8_t, int8x16_t, vorrq, s8)
+VORR_IMPL(uint16_t, uint16x8_t, vorrq, u16)
+VORR_IMPL(int16_t, int16x8_t, vorrq, s16)
+VORR_IMPL(uint32_t, uint32x4_t, vorrq, u32)
+VORR_IMPL(int32_t, int32x4_t, vorrq, s32)
+VORR_IMPL(uint64_t, uint64x2_t, vorrq, u64)
+VORR_IMPL(int64_t, int64x2_t, vorrq, s64)
+
+#undef VORR_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_ORR_H */
diff --git a/src/core/NEON/wrapper/intrinsics/pmax.h b/src/core/NEON/wrapper/intrinsics/pmax.h
new file mode 100644
index 0000000000..cd2b2d1f41
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/pmax.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_PMAX_H
+#define ARM_COMPUTE_WRAPPER_PMAX_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VPMAX_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vpmax(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VPMAX_IMPL(uint8_t, uint8x8_t, vpmax, u8)
+VPMAX_IMPL(int8_t, int8x8_t, vpmax, s8)
+VPMAX_IMPL(uint16_t, uint16x4_t, vpmax, u16)
+VPMAX_IMPL(int16_t, int16x4_t, vpmax, s16)
+VPMAX_IMPL(uint32_t, uint32x2_t, vpmax, u32)
+VPMAX_IMPL(int32_t, int32x2_t, vpmax, s32)
+VPMAX_IMPL(float, float32x2_t, vpmax, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VPMAX_IMPL(float16_t, float16x4_t, vpmax, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VPMAX_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_PMAX_H */
diff --git a/src/core/NEON/wrapper/intrinsics/pmin.h b/src/core/NEON/wrapper/intrinsics/pmin.h
new file mode 100644
index 0000000000..59b6be69ce
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/pmin.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_PMIN_H
+#define ARM_COMPUTE_WRAPPER_PMIN_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VPMIN_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vpmin(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VPMIN_IMPL(uint8_t, uint8x8_t, vpmin, u8)
+VPMIN_IMPL(int8_t, int8x8_t, vpmin, s8)
+VPMIN_IMPL(uint16_t, uint16x4_t, vpmin, u16)
+VPMIN_IMPL(int16_t, int16x4_t, vpmin, s16)
+VPMIN_IMPL(uint32_t, uint32x2_t, vpmin, u32)
+VPMIN_IMPL(int32_t, int32x2_t, vpmin, s32)
+VPMIN_IMPL(float, float32x2_t, vpmin, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VPMIN_IMPL(float16_t, float16x4_t, vpmin, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VPMIN_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_PMIN_H */
diff --git a/src/core/NEON/wrapper/intrinsics/pow.h b/src/core/NEON/wrapper/intrinsics/pow.h
new file mode 100644
index 0000000000..dfd6ccc358
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/pow.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_POW_H
+#define ARM_COMPUTE_WRAPPER_POW_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VPOW_IMPL(vtype, prefix, postfix) \
+ inline vtype vpow(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VPOW_IMPL(float32x4_t, vpowq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VPOW_IMPL(float16x8_t, vpowq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VPOW_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_POW_H */
diff --git a/src/core/NEON/wrapper/intrinsics/qmov.h b/src/core/NEON/wrapper/intrinsics/qmov.h
new file mode 100644
index 0000000000..9a0a23a241
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/qmov.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_QMOV_H
+#define ARM_COMPUTE_WRAPPER_QMOV_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+template <typename T>
+inline typename std::enable_if<std::is_same<T, uint8_t>::value, uint8x8_t>::type vqmov(const int16x8_t &a)
+{
+ return vqmovun_s16(a);
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, int8_t>::value, int8x8_t>::type vqmov(const int16x8_t &a)
+{
+ return vqmovn_s16(a);
+}
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_QMOV_H */
diff --git a/src/core/NEON/wrapper/intrinsics/qmovun.h b/src/core/NEON/wrapper/intrinsics/qmovun.h
new file mode 100644
index 0000000000..f823ddb513
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/qmovun.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_QMOVUN_H
+#define ARM_COMPUTE_WRAPPER_QMOVUN_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VQMOVUN_IMPL(dtype, vtype, prefix, postfix) \
+ inline dtype vqmovun(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VQMOVUN_IMPL(uint32x2_t, int64x2_t, vqmovun, s64)
+VQMOVUN_IMPL(uint16x4_t, int32x4_t, vqmovun, s32)
+VQMOVUN_IMPL(uint8x8_t, int16x8_t, vqmovun, s16)
+
+#undef VQMOVUN_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_QMOVUN_H */
diff --git a/src/core/NEON/wrapper/intrinsics/reinterpret.h b/src/core/NEON/wrapper/intrinsics/reinterpret.h
new file mode 100644
index 0000000000..c2c4f720d2
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/reinterpret.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_REINTERPRET_H
+#define ARM_COMPUTE_WRAPPER_REINTERPRET_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VREINTERPRET_IMPL(ptype, vtype, prefix, postfix1, postfix2) \
+ inline ptype vreinterpret(const vtype &a) \
+ { \
+ return prefix##_##postfix1##_##postfix2(a); \
+ } \
+ \
+ inline ptype vreinterpret(const ptype &a) \
+ { \
+ return a; \
+ }
+
+VREINTERPRET_IMPL(int16x4_t, uint16x4_t, vreinterpret, s16, u16)
+VREINTERPRET_IMPL(int16x8_t, uint16x8_t, vreinterpretq, s16, u16)
+VREINTERPRET_IMPL(int32x4_t, uint32x4_t, vreinterpretq, s32, u32)
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_REINTERPRET_H */
diff --git a/src/core/NEON/wrapper/intrinsics/rev64.h b/src/core/NEON/wrapper/intrinsics/rev64.h
new file mode 100644
index 0000000000..0f0139c93b
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/rev64.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_REV64_H
+#define ARM_COMPUTE_WRAPPER_REV64_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VREV64_IMPL(vtype, prefix, postfix) \
+ inline vtype vrev64(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VREV64_IMPL(uint8x8_t, vrev64, u8)
+VREV64_IMPL(int8x8_t, vrev64, s8)
+VREV64_IMPL(uint16x4_t, vrev64, u16)
+VREV64_IMPL(int16x4_t, vrev64, s16)
+VREV64_IMPL(uint32x2_t, vrev64, u32)
+VREV64_IMPL(int32x2_t, vrev64, s32)
+VREV64_IMPL(float32x2_t, vrev64, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VREV64_IMPL(float16x4_t, vrev64, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VREV64_IMPL(uint8x16_t, vrev64q, u8)
+VREV64_IMPL(int8x16_t, vrev64q, s8)
+VREV64_IMPL(uint16x8_t, vrev64q, u16)
+VREV64_IMPL(int16x8_t, vrev64q, s16)
+VREV64_IMPL(uint32x4_t, vrev64q, u32)
+VREV64_IMPL(int32x4_t, vrev64q, s32)
+VREV64_IMPL(float32x4_t, vrev64q, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VREV64_IMPL(float16x8_t, vrev64q, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VREV64_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_REV64_H */
diff --git a/src/core/NEON/wrapper/intrinsics/round.h b/src/core/NEON/wrapper/intrinsics/round.h
new file mode 100644
index 0000000000..7789aab770
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/round.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_ROUND_H
+#define ARM_COMPUTE_WRAPPER_ROUND_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VROUNDQ_IMPL(vtype, postfix) \
+ inline vtype vround(const vtype &a) \
+ { \
+ return vroundq_rte_##postfix(a); \
+ }
+
+#define VROUNDQ_IMPL_INT(vtype, postfix) \
+ inline vtype vround(const vtype &a) \
+ { \
+ ARM_COMPUTE_UNUSED(a); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+VROUNDQ_IMPL(float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VROUNDQ_IMPL(float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VROUNDQ_IMPL_INT(int32x4_t, s32)
+#undef VROUNDQ_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_ROUND_H */
diff --git a/src/core/NEON/wrapper/intrinsics/setlane.h b/src/core/NEON/wrapper/intrinsics/setlane.h
new file mode 100644
index 0000000000..259b8eaf90
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/setlane.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_SET_LANE_H
+#define ARM_COMPUTE_WRAPPER_SET_LANE_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSETLANE_IMPL_8(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vset_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vset_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vset_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vset_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vset_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vset_lane_##postfix(value, vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETLANE_IMPL_4(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vset_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vset_lane_##postfix(value, vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETLANE_IMPL_2(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vset_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vset_lane_##postfix(value, vector, 1); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VSETLANE_IMPL_8(uint8x8_t, uint8_t, uint8x8_t, u8)
+VSETLANE_IMPL_8(int8x8_t, int8_t, int8x8_t, s8)
+VSETLANE_IMPL_4(uint16x4_t, uint16_t, uint16x4_t, u16)
+VSETLANE_IMPL_4(int16x4_t, int16_t, int16x4_t, s16)
+VSETLANE_IMPL_2(uint32x2_t, uint32_t, uint32x2_t, u32)
+VSETLANE_IMPL_2(int32x2_t, int32_t, int32x2_t, s32)
+VSETLANE_IMPL_2(float32x2_t, float, float32x2_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSETLANE_IMPL_4(float16x4_t, float16_t, float16x4_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#define VSETQLANE_IMPL_16(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vsetq_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vsetq_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vsetq_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vsetq_lane_##postfix(value, vector, 7); \
+ case 8: \
+ return vsetq_lane_##postfix(value, vector, 8); \
+ case 9: \
+ return vsetq_lane_##postfix(value, vector, 9); \
+ case 10: \
+ return vsetq_lane_##postfix(value, vector, 10); \
+ case 11: \
+ return vsetq_lane_##postfix(value, vector, 11); \
+ case 12: \
+ return vsetq_lane_##postfix(value, vector, 12); \
+ case 13: \
+ return vsetq_lane_##postfix(value, vector, 13); \
+ case 14: \
+ return vsetq_lane_##postfix(value, vector, 14); \
+ case 15: \
+ return vsetq_lane_##postfix(value, vector, 15); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETQLANE_IMPL_8(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ case 4: \
+ return vsetq_lane_##postfix(value, vector, 4); \
+ case 5: \
+ return vsetq_lane_##postfix(value, vector, 5); \
+ case 6: \
+ return vsetq_lane_##postfix(value, vector, 6); \
+ case 7: \
+ return vsetq_lane_##postfix(value, vector, 7); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+#define VSETQLANE_IMPL_4(stype, atype, vtype, postfix) \
+ inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \
+ { \
+ switch (lane) \
+ { \
+ case 0: \
+ return vsetq_lane_##postfix(value, vector, 0); \
+ case 1: \
+ return vsetq_lane_##postfix(value, vector, 1); \
+ case 2: \
+ return vsetq_lane_##postfix(value, vector, 2); \
+ case 3: \
+ return vsetq_lane_##postfix(value, vector, 3); \
+ default: \
+ ARM_COMPUTE_ERROR("Invalid lane"); \
+ } \
+ }
+
+VSETQLANE_IMPL_16(uint8x16_t, uint8_t, uint8x16_t, u8)
+VSETQLANE_IMPL_16(int8x16_t, int8_t, int8x16_t, s8)
+VSETQLANE_IMPL_8(uint16x8_t, uint16_t, uint16x8_t, u16)
+VSETQLANE_IMPL_8(int16x8_t, int16_t, int16x8_t, s16)
+VSETQLANE_IMPL_4(uint32x4_t, uint32_t, uint32x4_t, u32)
+VSETQLANE_IMPL_4(int32x4_t, int32_t, int32x4_t, s32)
+VSETQLANE_IMPL_4(float32x4_t, float, float32x4_t, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSETQLANE_IMPL_8(float16x8_t, float16_t, float16x8_t, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VSETLANE_IMPL_8
+#undef VSETLANE_IMPL_4
+#undef VSETLANE_IMPL_2
+
+#undef VSETQLANE_IMPL_16
+#undef VSETQLANE_IMPL_8
+#undef VSETQLANE_IMPL_4
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_SET_LANE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h
new file mode 100644
index 0000000000..6ccb9cdf92
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/shr.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_WRAPPER_SHR_H
+#define ARM_COMPUTE_WRAPPER_SHR_H
+
+#include <arm_neon.h>
+#include <type_traits>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VQRSHRN_IMPL(half_vtype, vtype, prefix, postfix) \
+ template <int b> \
+ inline half_vtype vqrshrn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VQRSHRN_IMPL(int8x8_t, int16x8_t, vqrshrn_n, s16)
+VQRSHRN_IMPL(uint8x8_t, uint16x8_t, vqrshrn_n, u16)
+VQRSHRN_IMPL(int16x4_t, int32x4_t, vqrshrn_n, s32)
+VQRSHRN_IMPL(uint16x4_t, uint32x4_t, vqrshrn_n, u32)
+VQRSHRN_IMPL(int32x2_t, int64x2_t, vqrshrn_n, s64)
+VQRSHRN_IMPL(uint32x2_t, uint64x2_t, vqrshrn_n, u64)
+
+#undef VQRSHRN_IMPL
+
+#ifdef __aarch64__
+#define VQRSHRN_SCALAR_IMPL(half_vtype, vtype, prefix, postfix) \
+ template <int b> \
+ inline half_vtype vqrshrn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQRSHRN_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, s16)
+VQRSHRN_SCALAR_IMPL(uint8_t, uint16_t, vqrshrnh_n, u16)
+VQRSHRN_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, s32)
+VQRSHRN_SCALAR_IMPL(uint16_t, uint32_t, vqrshrns_n, u32)
+VQRSHRN_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, s64)
+VQRSHRN_SCALAR_IMPL(uint32_t, uint64_t, vqrshrnd_n, u64)
+
+#undef VQRSHRN_SCALAR_IMPL
+#endif // __aarch64__
+
+// This function is the mixed version of VQRSHRN and VQRSHRUN.
+// The input vector is always signed integer, while the returned vector
+// can be either signed or unsigned depending on the signedness of scalar type T.
+#define VQRSHRN_EX_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_signed##_##postfix(a, b); \
+ } \
+ \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_unsigned##_##postfix(a, b); \
+ }
+VQRSHRN_EX_IMPL(int8x8_t, int16x8_t, vqrshrn_n, vqrshrun_n, s16)
+VQRSHRN_EX_IMPL(int16x4_t, int32x4_t, vqrshrn_n, vqrshrun_n, s32)
+VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64)
+#undef VQRSHRN_EX_IMPL
+
+#define VSHR_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshr_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHR_IMPL(uint8x8_t, vshr_n, u8)
+VSHR_IMPL(int8x8_t, vshr_n, s8)
+#undef VSHR_IMPL
+
+#define VSHRQ_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshrq_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHRQ_IMPL(uint32x4_t, vshrq_n, u32)
+VSHRQ_IMPL(int32x4_t, vshrq_n, s32)
+#undef VSHRQ_IMPL
+
+#ifdef __aarch64__
+#define VSHRQ_SCALAR_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshrq_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHRQ_SCALAR_IMPL(uint32_t, vshrd_n, u64)
+VSHRQ_SCALAR_IMPL(int32_t, vshrd_n, s64)
+
+#undef VSHRQ_SCALAR_IMPL
+#endif // __aarch64__
+
+#ifdef __aarch64__
+#define VQRSHRN_EX_SCALAR_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_signed##_##postfix(a, b); \
+ } \
+ \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_unsigned##_##postfix(a, b); \
+ }
+
+VQRSHRN_EX_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, vqrshrunh_n, s16)
+VQRSHRN_EX_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, vqrshruns_n, s32)
+VQRSHRN_EX_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, vqrshrund_n, s64)
+
+#undef VQRSHRN_EX_IMPL
+#endif // __aarch64__
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_SHR_H */
diff --git a/src/core/NEON/wrapper/intrinsics/sin.h b/src/core/NEON/wrapper/intrinsics/sin.h
new file mode 100644
index 0000000000..d24fdfa816
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/sin.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_SIN_H
+#define ARM_COMPUTE_WRAPPER_SIN_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSIN_IMPL(vtype, prefix, postfix) \
+ inline vtype vsin(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+#define VSIN_IMPL_INT(vtype, prefix, postfix) \
+ inline vtype vsin(const vtype &a) \
+ { \
+ ARM_COMPUTE_UNUSED(a); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+VSIN_IMPL(float32x4_t, vsinq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSIN_IMPL(float16x8_t, vsinq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VSIN_IMPL_INT(int32x4_t, vsinq, s32)
+
+#undef vsub_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_SUB_H */
diff --git a/src/core/NEON/wrapper/intrinsics/sqrt.h b/src/core/NEON/wrapper/intrinsics/sqrt.h
new file mode 100644
index 0000000000..11954cf6c9
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/sqrt.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_SQRT_H
+#define ARM_COMPUTE_WRAPPER_SQRT_H
+
+#ifdef __aarch64__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSQRT_IMPL(type, prefix, postfix) \
+ inline type vsqrt(const type &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VSQRT_IMPL(float32x2_t, vsqrt, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSQRT_IMPL(float16x4_t, vsqrt, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VSQRT_IMPL(float32x4_t, vsqrtq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSQRT_IMPL(float16x8_t, vsqrtq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif // __aarch64__
+
+#endif /* ARM_COMPUTE_WRAPPER_SQRT_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/store.h b/src/core/NEON/wrapper/intrinsics/store.h
new file mode 100644
index 0000000000..ce1b9a554e
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/store.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_STORE_H
+#define ARM_COMPUTE_WRAPPER_STORE_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSTORE_IMPL(stype, vtype, prefix, postfix) \
+ inline void vstore(stype *ptr, vtype val) \
+ { \
+ prefix##_##postfix(ptr, val); \
+ }
+
+VSTORE_IMPL(uint8_t, uint8x8_t, vst1, u8)
+VSTORE_IMPL(uint8_t, uint8x8x2_t, vst2, u8)
+VSTORE_IMPL(int8_t, int8x8_t, vst1, s8)
+VSTORE_IMPL(int8_t, int8x8x2_t, vst2, s8)
+VSTORE_IMPL(uint16_t, uint16x4_t, vst1, u16)
+VSTORE_IMPL(int16_t, int16x4_t, vst1, s16)
+VSTORE_IMPL(uint32_t, uint32x2_t, vst1, u32)
+VSTORE_IMPL(int32_t, int32x2_t, vst1, s32)
+VSTORE_IMPL(float, float32x2_t, vst1, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSTORE_IMPL(float16_t, float16x4_t, vst1, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VSTORE_IMPL(uint8_t, uint8x16_t, vst1q, u8)
+VSTORE_IMPL(int8_t, int8x16_t, vst1q, s8)
+VSTORE_IMPL(uint16_t, uint16x8_t, vst1q, u16)
+VSTORE_IMPL(int16_t, int16x8_t, vst1q, s16)
+VSTORE_IMPL(uint32_t, uint32x4_t, vst1q, u32)
+VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32)
+VSTORE_IMPL(float, float32x4_t, vst1q, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSTORE_IMPL(float16_t, float16x8_t, vst1q, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VSTORE_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_STORE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/sub.h b/src/core/NEON/wrapper/intrinsics/sub.h
new file mode 100644
index 0000000000..20436714ef
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/sub.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_SUB_H
+#define ARM_COMPUTE_WRAPPER_SUB_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VSUB_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vsub(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VSUB_IMPL(uint8x8_t, uint8x8_t, vsub, u8)
+VSUB_IMPL(int8x8_t, int8x8_t, vsub, s8)
+VSUB_IMPL(uint16x4_t, uint16x4_t, vsub, u16)
+VSUB_IMPL(int16x4_t, int16x4_t, vsub, s16)
+VSUB_IMPL(uint32x2_t, uint32x2_t, vsub, u32)
+VSUB_IMPL(int32x2_t, int32x2_t, vsub, s32)
+VSUB_IMPL(uint64x1_t, uint64x1_t, vsub, u64)
+VSUB_IMPL(int64x1_t, int64x1_t, vsub, s64)
+VSUB_IMPL(float32x2_t, float32x2_t, vsub, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSUB_IMPL(float16x4_t, float16x4_t, vsub, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VSUB_IMPL(uint8x16_t, uint8x16_t, vsubq, u8)
+VSUB_IMPL(int8x16_t, int8x16_t, vsubq, s8)
+VSUB_IMPL(uint16x8_t, uint16x8_t, vsubq, u16)
+VSUB_IMPL(int16x8_t, int16x8_t, vsubq, s16)
+VSUB_IMPL(uint32x4_t, uint32x4_t, vsubq, u32)
+VSUB_IMPL(int32x4_t, int32x4_t, vsubq, s32)
+VSUB_IMPL(uint64x2_t, uint64x2_t, vsubq, u64)
+VSUB_IMPL(int64x2_t, int64x2_t, vsubq, s64)
+VSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VSUB_IMPL
+
+// VQSUB: Vector saturating sub (No notion of saturation for floating point)
+#define VQSUB_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vqsub(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQSUB_IMPL(uint8x8_t, uint8x8_t, vqsub, u8)
+VQSUB_IMPL(int8x8_t, int8x8_t, vqsub, s8)
+VQSUB_IMPL(uint16x4_t, uint16x4_t, vqsub, u16)
+VQSUB_IMPL(int16x4_t, int16x4_t, vqsub, s16)
+VQSUB_IMPL(uint32x2_t, uint32x2_t, vqsub, u32)
+VQSUB_IMPL(int32x2_t, int32x2_t, vqsub, s32)
+VQSUB_IMPL(uint64x1_t, uint64x1_t, vqsub, u64)
+VQSUB_IMPL(int64x1_t, int64x1_t, vqsub, s64)
+VQSUB_IMPL(float32x2_t, float32x2_t, vsub, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VQSUB_IMPL(float16x4_t, float16x4_t, vsub, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+VQSUB_IMPL(uint8x16_t, uint8x16_t, vqsubq, u8)
+VQSUB_IMPL(int8x16_t, int8x16_t, vqsubq, s8)
+VQSUB_IMPL(uint16x8_t, uint16x8_t, vqsubq, u16)
+VQSUB_IMPL(int16x8_t, int16x8_t, vqsubq, s16)
+VQSUB_IMPL(uint32x4_t, uint32x4_t, vqsubq, u32)
+VQSUB_IMPL(int32x4_t, int32x4_t, vqsubq, s32)
+VQSUB_IMPL(uint64x2_t, uint64x2_t, vqsubq, u64)
+VQSUB_IMPL(int64x2_t, int64x2_t, vqsubq, s64)
+VQSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VQSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#undef VQSUB_IMPL
+
+#define VSUBL_IMPL(rtype, vtype, prefix, postfix) \
+ inline rtype vsubl(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VSUBL_IMPL(int16x8_t, int8x8_t, vsubl, s8)
+VSUBL_IMPL(int32x4_t, int16x4_t, vsubl, s16)
+VSUBL_IMPL(int64x2_t, int32x2_t, vsubl, s32)
+VSUBL_IMPL(uint16x8_t, uint8x8_t, vsubl, u8)
+VSUBL_IMPL(uint32x4_t, uint16x4_t, vsubl, u16)
+VSUBL_IMPL(uint64x2_t, uint32x2_t, vsubl, u32)
+
+#undef VSUB_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_SUB_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svcnt.h b/src/core/NEON/wrapper/intrinsics/svcnt.h
new file mode 100644
index 0000000000..c4652504b4
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svcnt.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCNT_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCNT_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+template <size_t element_size>
+inline uint64_t svcnt_size();
+
+template <>
+inline uint64_t svcnt_size<64>()
+{
+ return svcntd();
+}
+
+template <>
+inline uint64_t svcnt_size<32>()
+{
+ return svcntw();
+}
+
+template <>
+inline uint64_t svcnt_size<16>()
+{
+ return svcnth();
+}
+
+template <>
+inline uint64_t svcnt_size<8>()
+{
+ return svcntb();
+}
+
+template <typename T>
+inline uint64_t svcnt()
+{
+ return svcnt_size<sizeof(T) * 8>();
+}
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCNT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svcvt.h b/src/core/NEON/wrapper/intrinsics/svcvt.h
new file mode 100644
index 0000000000..00ef7b7eb3
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svcvt.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCVT_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCVT_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVCVT_Z_TO_F32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float>::value, svfloat32_t>::type svcvt_z(svbool_t pg, \
+ const vtype &a) \
+ { \
+ return svcvt_f32_z(pg, a); \
+ }
+
+SVCVT_Z_TO_F32_IMPL(svuint32_t)
+SVCVT_Z_TO_F32_IMPL(svint32_t)
+SVCVT_Z_TO_F32_IMPL(svfloat16_t)
+
+#undef SVCVT_Z_TO_F32_IMPL
+
+#define SVCVT_Z_TO_F16_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float16_t>::value, svfloat16_t>::type svcvt_z(svbool_t pg, \
+ const vtype &a) \
+ { \
+ return svcvt_f16_z(pg, a); \
+ }
+
+SVCVT_Z_TO_F16_IMPL(svuint32_t)
+SVCVT_Z_TO_F16_IMPL(svint32_t)
+SVCVT_Z_TO_F16_IMPL(svfloat32_t)
+
+#undef SVCVT_Z_TO_F16_IMPL
+
+#define SVCVT_Z_TO_S32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, int32_t>::value, svint32_t>::type svcvt_z(svbool_t pg, \
+ const vtype &a) \
+ { \
+ return svcvt_s32_z(pg, a); \
+ }
+
+SVCVT_Z_TO_S32_IMPL(svfloat16_t)
+SVCVT_Z_TO_S32_IMPL(svfloat32_t)
+
+#undef SVCVT_Z_TO_S32_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVCVT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svdup_n.h b/src/core/NEON/wrapper/intrinsics/svdup_n.h
new file mode 100644
index 0000000000..9c42c86db7
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svdup_n.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020, 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVDUP_N_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVDUP_N_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVDUP_N_IMPL(etype, vtype, postfix) \
+ inline vtype svdup_n(etype a) \
+ { \
+ return svdup_n_##postfix(a); \
+ }
+
+SVDUP_N_IMPL(int8_t, svint8_t, s8)
+SVDUP_N_IMPL(int16_t, svint16_t, s16)
+SVDUP_N_IMPL(int32_t, svint32_t, s32)
+SVDUP_N_IMPL(int64_t, svint64_t, s64)
+SVDUP_N_IMPL(uint8_t, svuint8_t, u8)
+SVDUP_N_IMPL(uint16_t, svuint16_t, u16)
+SVDUP_N_IMPL(uint32_t, svuint32_t, u32)
+SVDUP_N_IMPL(uint64_t, svuint64_t, u64)
+SVDUP_N_IMPL(float16_t, svfloat16_t, f16)
+SVDUP_N_IMPL(float, svfloat32_t, f32)
+SVDUP_N_IMPL(float64_t, svfloat64_t, f64)
+#if __ARM_FEATURE_SVE_BF16
+SVDUP_N_IMPL(bfloat16_t, svbfloat16_t, bf16)
+#endif // #if __ARM_FEATURE_SVE_BF16
+
+#undef SVDUP_N_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVDUP_N_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svexp.h b/src/core/NEON/wrapper/intrinsics/svexp.h
new file mode 100644
index 0000000000..1e8bce3960
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svexp.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVEXP_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVEXP_H
+
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+
+#include <arm_sve.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVEXP_IMPL(vtype, postfix) \
+ inline vtype svexp_z(svbool_t pg, const vtype &a) \
+ { \
+ return svexp_##postfix##_z(pg, a); \
+ }
+
+SVEXP_IMPL(svfloat32_t, f32)
+SVEXP_IMPL(svfloat16_t, f16)
+
+#undef SVEXP_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVEXP_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svlog.h b/src/core/NEON/wrapper/intrinsics/svlog.h
new file mode 100644
index 0000000000..b4630e20ed
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svlog.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVLOG_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVLOG_H
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+
+#include <arm_sve.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVLOG_IMPL(vtype, postfix) \
+ inline vtype svlog_z(svbool_t pg, const vtype &a) \
+ { \
+ return svlog_##postfix##_z(pg, a); \
+ }
+
+SVLOG_IMPL(svfloat32_t, f32)
+SVLOG_IMPL(svfloat16_t, f16)
+
+#undef SVLOG_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVLOG_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svpow.h b/src/core/NEON/wrapper/intrinsics/svpow.h
new file mode 100644
index 0000000000..0f58d758cb
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svpow.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2020-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPOW_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPOW_H
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVPOW_Z_IMPL(type, postfix) \
+ inline type svpow_z(svbool_t pg, const type &a, const type &b) \
+ { \
+ return svpow_##postfix##_z(pg, a, b); \
+ }
+
+#define SVPOW_Z_IMPL_INT(type, postfix) \
+ inline type svpow_z(svbool_t pg, const type &a, const type &b) \
+ { \
+ ARM_COMPUTE_UNUSED(pg, a, b); \
+ ARM_COMPUTE_ERROR("Not supported"); \
+ }
+
+SVPOW_Z_IMPL(svfloat32_t, f32)
+SVPOW_Z_IMPL(svfloat16_t, f16)
+SVPOW_Z_IMPL_INT(svint16_t, s16)
+
+#undef SVPOW_Z_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPOW_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svptrue.h b/src/core/NEON/wrapper/intrinsics/svptrue.h
new file mode 100644
index 0000000000..6ed00bccbf
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svptrue.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPTRUE_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPTRUE_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+template <size_t element_size>
+inline svbool_t svptrue_size();
+
+template <>
+inline svbool_t svptrue_size<64>()
+{
+ return svptrue_b64();
+}
+
+template <>
+inline svbool_t svptrue_size<32>()
+{
+ return svptrue_b32();
+}
+
+template <>
+inline svbool_t svptrue_size<16>()
+{
+ return svptrue_b16();
+}
+
+template <>
+inline svbool_t svptrue_size<8>()
+{
+ return svptrue_b8();
+}
+
+template <typename T>
+svbool_t svptrue()
+{
+ return svptrue_size<sizeof(T) * 8>();
+}
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVPTRUE_H */
diff --git a/src/core/NEON/wrapper/intrinsics/svqadd.h b/src/core/NEON/wrapper/intrinsics/svqadd.h
new file mode 100644
index 0000000000..fd45d82104
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svqadd.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2020-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVQADD_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVQADD_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVQADD_IMPL_F(type, postfix, svppostfix) \
+ inline type svqadd(const type &val1, const type &val2) \
+ { \
+ return svadd_##postfix##_z(svptrue_##svppostfix(), val1, val2); \
+ }
+
+SVQADD_IMPL_F(svfloat32_t, f32, b32)
+SVQADD_IMPL_F(svfloat16_t, f16, b16)
+#undef SVQADD_IMPL_F
+
+#define SVQADD_IMPL(type, postfix) \
+ inline type svqadd(const type &val1, const type &val2) \
+ { \
+ return svqadd_##postfix(val1, val2); \
+ }
+
+SVQADD_IMPL(svint32_t, s32)
+SVQADD_IMPL(svint16_t, s16)
+SVQADD_IMPL(svint8_t, s8)
+SVQADD_IMPL(svuint32_t, u32)
+SVQADD_IMPL(svuint16_t, u16)
+SVQADD_IMPL(svuint8_t, u8)
+
+#undef SVQADD_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVQADD_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svreinterpret.h b/src/core/NEON/wrapper/intrinsics/svreinterpret.h
new file mode 100644
index 0000000000..e98742676d
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svreinterpret.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVREINTERPRET_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVREINTERPRET_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVREINTERPRET_TO_F32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, float>::value, svfloat32_t>::type svreinterpret(const vtype &a) \
+ { \
+ return svreinterpret_f32(a); \
+ }
+
+SVREINTERPRET_TO_F32_IMPL(svuint32_t)
+#undef SVREINTERPRET_TO_F32_IMPL
+
+#define SVREINTERPRET_TO_U32_IMPL(vtype) \
+ template <typename T> \
+ inline typename std::enable_if<std::is_same<T, uint32_t>::value, svuint32_t>::type svreinterpret(const vtype &a) \
+ { \
+ return svreinterpret_u32(a); \
+ }
+
+SVREINTERPRET_TO_U32_IMPL(svint32_t)
+SVREINTERPRET_TO_U32_IMPL(svfloat32_t)
+#undef SVREINTERPRET_TO_U32_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVREINTERPRET_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svsin.h b/src/core/NEON/wrapper/intrinsics/svsin.h
new file mode 100644
index 0000000000..05d88d0250
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svsin.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVSIN_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVSIN_H
+#if defined(__ARM_FEATURE_SVE)
+#include "src/core/NEON/SVEMath.h"
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVSIN_Z_IMPL(type, postfix) \
+ inline type svsin_z(svbool_t pg, const type &val) \
+ { \
+ return svsin_##postfix##_z(pg, val); \
+ }
+
+SVSIN_Z_IMPL(svfloat32_t, f32)
+SVSIN_Z_IMPL(svfloat16_t, f16)
+
+#undef SVSIN_Z_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVSIN_H */ \ No newline at end of file
diff --git a/src/core/NEON/wrapper/intrinsics/svwhilelt.h b/src/core/NEON/wrapper/intrinsics/svwhilelt.h
new file mode 100644
index 0000000000..f0f84a9508
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/svwhilelt.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_NEON_WRAPPER_INTRINSICS_SVWHILELT_H
+#define SRC_CORE_NEON_WRAPPER_INTRINSICS_SVWHILELT_H
+#if defined(__ARM_FEATURE_SVE)
+#include <arm_sve.h>
+namespace arm_compute
+{
+namespace wrapper
+{
+#define SVWHILELT_IMPL(type) \
+ template <size_t element_size> \
+ inline svbool_t svwhilelt_size(type a, type b); \
+ \
+ template <> \
+ inline svbool_t svwhilelt_size<64>(type a, type b) \
+ { \
+ return svwhilelt_b64(a, b); \
+ } \
+ template <> \
+ inline svbool_t svwhilelt_size<32>(type a, type b) \
+ { \
+ return svwhilelt_b32(a, b); \
+ } \
+ template <> \
+ inline svbool_t svwhilelt_size<16>(type a, type b) \
+ { \
+ return svwhilelt_b16(a, b); \
+ } \
+ template <> \
+ inline svbool_t svwhilelt_size<8>(type a, type b) \
+ { \
+ return svwhilelt_b8(a, b); \
+ }
+
+SVWHILELT_IMPL(int32_t)
+SVWHILELT_IMPL(uint32_t)
+SVWHILELT_IMPL(int64_t)
+SVWHILELT_IMPL(uint64_t)
+
+#undef SVWHILELT_IMPL
+
+template <typename ScalarType, typename IndexType>
+inline svbool_t svwhilelt(IndexType a, IndexType b)
+{
+ return svwhilelt_size<sizeof(ScalarType) * 8>(a, b);
+}
+} // namespace wrapper
+} // namespace arm_compute
+
+#endif /* defined(__ARM_FEATURE_SVE) */
+#endif /* SRC_CORE_NEON_WRAPPER_INTRINSICS_SVWHILELT_H */
diff --git a/src/core/NEON/wrapper/intrinsics/tanh.h b/src/core/NEON/wrapper/intrinsics/tanh.h
new file mode 100644
index 0000000000..e74f0e86fe
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/tanh.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_TANH_H
+#define ARM_COMPUTE_WRAPPER_TANH_H
+
+#include "src/core/NEON/NEMath.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VTANH_IMPL(vtype, prefix, postfix) \
+ inline vtype vtanh(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VTANH_IMPL(float32x4_t, vtanhq, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VTANH_IMPL(float16x8_t, vtanhq, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#undef VTANH_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_TANH_H */
diff --git a/src/core/NEON/wrapper/intrinsics/tbl.h b/src/core/NEON/wrapper/intrinsics/tbl.h
new file mode 100644
index 0000000000..05e6c1fc13
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/tbl.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_WRAPPER_TBL_H
+#define ARM_COMPUTE_WRAPPER_TBL_H
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VTBL_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vtbl(const stype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VTBL_IMPL(uint8x8x2_t, uint8x8_t, vtbl2, u8)
+VTBL_IMPL(int8x8x2_t, int8x8_t, vtbl2, s8)
+
+#undef VTBL_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_TBL_H */