aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/wrapper/intrinsics
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core/NEON/wrapper/intrinsics')
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/add.h82
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/eor.h56
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h2
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/reinterpret.h43
4 files changed, 182 insertions, 1 deletions
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/add.h b/arm_compute/core/NEON/wrapper/intrinsics/add.h
index 4f4d244489..1839170485 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/add.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/add.h
@@ -63,13 +63,13 @@ VADD_IMPL(float16x8_t, float16x8_t, vaddq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VADD_IMPL
+// VQADD: Vector saturating add (No notion of saturation for floating point)
#define VQADD_IMPL(stype, vtype, prefix, postfix) \
inline vtype vqadd(const vtype &a, const vtype &b) \
{ \
return prefix##_##postfix(a, b); \
}
-// VQADD: Vector saturating add (No notion of saturation for floating point)
VQADD_IMPL(uint8x8_t, uint8x8_t, vqadd, u8)
VQADD_IMPL(int8x8_t, int8x8_t, vqadd, s8)
VQADD_IMPL(uint16x4_t, uint16x4_t, vqadd, u16)
@@ -96,6 +96,86 @@ VQADD_IMPL(float32x4_t, float32x4_t, vaddq, f32)
VQADD_IMPL(float16x8_t, float16x8_t, vaddq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VQADD_IMPL
+
+// VADDW: Vector widening add
+#define VADDW_IMPL(wtype, vtype, prefix, postfix) \
+ inline wtype vaddw(const wtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VADDW_IMPL(uint16x8_t, uint8x8_t, vaddw, u8)
+VADDW_IMPL(int16x8_t, int8x8_t, vaddw, s8)
+VADDW_IMPL(uint32x4_t, uint16x4_t, vaddw, u16)
+VADDW_IMPL(int32x4_t, int16x4_t, vaddw, s16)
+VADDW_IMPL(uint64x2_t, uint32x2_t, vaddw, u32)
+VADDW_IMPL(int64x2_t, int32x2_t, vaddw, s32)
+#undef VADDW_IMPL
+
+// VADDL: Vector long add
+#define VADDL_IMPL(wtype, vtype, prefix, postfix) \
+ inline wtype vaddl(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VADDL_IMPL(uint16x8_t, uint8x8_t, vaddl, u8)
+VADDL_IMPL(int16x8_t, int8x8_t, vaddl, s8)
+VADDL_IMPL(uint32x4_t, uint16x4_t, vaddl, u16)
+VADDL_IMPL(int32x4_t, int16x4_t, vaddl, s16)
+VADDL_IMPL(uint64x2_t, uint32x2_t, vaddl, u32)
+VADDL_IMPL(int64x2_t, int32x2_t, vaddl, s32)
+#undef VADDL_IMPL
+
+#if defined(__aarch64__)
+// VADDV: Across vector add
+#define VADDV_IMPL(stype, vtype, prefix, postfix) \
+ inline stype vaddv(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VADDV_IMPL(uint8_t, uint8x8_t, vaddv, u8)
+VADDV_IMPL(int8_t, int8x8_t, vaddv, s8)
+VADDV_IMPL(uint16_t, uint16x4_t, vaddv, u16)
+VADDV_IMPL(int16_t, int16x4_t, vaddv, s16)
+VADDV_IMPL(uint32_t, uint32x2_t, vaddv, u32)
+VADDV_IMPL(int32_t, int32x2_t, vaddv, s32)
+VADDV_IMPL(float, float32x2_t, vaddv, f32)
+
+VADDV_IMPL(uint8_t, uint8x16_t, vaddvq, u8)
+VADDV_IMPL(int8_t, int8x16_t, vaddvq, s8)
+VADDV_IMPL(uint16_t, uint16x8_t, vaddvq, u16)
+VADDV_IMPL(int16_t, int16x8_t, vaddvq, s16)
+VADDV_IMPL(uint32_t, uint32x4_t, vaddvq, u32)
+VADDV_IMPL(int32_t, int32x4_t, vaddvq, s32)
+VADDV_IMPL(uint64_t, uint64x2_t, vaddvq, u64)
+VADDV_IMPL(int64_t, int64x2_t, vaddvq, s64)
+VADDV_IMPL(float, float32x4_t, vaddvq, f32)
+#undef VADDV_IMPL
+#endif // defined(__aarch64__)
+
+// VPADDL: Signed add long pairwise
+#define VPADDL_IMPL(ltype, vtype, prefix, postfix) \
+ inline ltype vpaddl(const vtype &a) \
+ { \
+ return prefix##_##postfix(a); \
+ }
+
+VPADDL_IMPL(uint16x4_t, uint8x8_t, vpaddl, u8)
+VPADDL_IMPL(int16x4_t, int8x8_t, vpaddl, s8)
+VPADDL_IMPL(uint32x2_t, uint16x4_t, vpaddl, u16)
+VPADDL_IMPL(int32x2_t, int16x4_t, vpaddl, s16)
+VPADDL_IMPL(uint64x1_t, uint32x2_t, vpaddl, u32)
+VPADDL_IMPL(int64x1_t, int32x2_t, vpaddl, s32)
+
+VPADDL_IMPL(uint16x8_t, uint8x16_t, vpaddlq, u8)
+VPADDL_IMPL(int16x8_t, int8x16_t, vpaddlq, s8)
+VPADDL_IMPL(uint32x4_t, uint16x8_t, vpaddlq, u16)
+VPADDL_IMPL(int32x4_t, int16x8_t, vpaddlq, s16)
+VPADDL_IMPL(uint64x2_t, uint32x4_t, vpaddlq, u32)
+VPADDL_IMPL(int64x2_t, int32x4_t, vpaddlq, s32)
+#undef VPADDL_IMPL
} // namespace wrapper
} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_ADD_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/eor.h b/arm_compute/core/NEON/wrapper/intrinsics/eor.h
new file mode 100644
index 0000000000..1002808327
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/eor.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_EOR_H__
+#define __ARM_COMPUTE_WRAPPER_EOR_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VEOR_IMPL(vtype, prefix, postfix) \
+ inline vtype veor(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VEOR_IMPL(uint8x8_t, veor, u8)
+VEOR_IMPL(int8x8_t, veor, s8)
+VEOR_IMPL(uint16x4_t, veor, u16)
+VEOR_IMPL(int16x4_t, veor, s16)
+VEOR_IMPL(uint32x2_t, veor, u32)
+VEOR_IMPL(int32x2_t, veor, s32)
+
+VEOR_IMPL(uint8x16_t, veorq, u8)
+VEOR_IMPL(int8x16_t, veorq, s8)
+VEOR_IMPL(uint16x8_t, veorq, u16)
+VEOR_IMPL(int16x8_t, veorq, s16)
+VEOR_IMPL(uint32x4_t, veorq, u32)
+VEOR_IMPL(int32x4_t, veorq, s32)
+
+#undef VEOR_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_EOR_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 0362ca125f..6eae1cf801 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -35,6 +35,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/combine.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/div.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/eor.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/exp.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/gethigh.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/getlane.h"
@@ -56,6 +57,7 @@
#include "arm_compute/core/NEON/wrapper/intrinsics/pmax.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pmin.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/pow.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/reinterpret.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/rev64.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/round.h"
#include "arm_compute/core/NEON/wrapper/intrinsics/setlane.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/reinterpret.h b/arm_compute/core/NEON/wrapper/intrinsics/reinterpret.h
new file mode 100644
index 0000000000..0956959e20
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/reinterpret.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_REINTERPRET_H__
+#define __ARM_COMPUTE_WRAPPER_REINTERPRET_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+inline int32x4_t vreinterpret_s32(const uint32x4_t &val)
+{
+ return vreinterpretq_s32_u32(val);
+}
+inline int32x4_t vreinterpret_s32(const int32x4_t &val)
+{
+ return val;
+}
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_REINTERPRET_H__ */