From ddb93bbf12fc9d685e7ddbef703a886d67cbda9b Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 2 Oct 2020 16:38:59 +0100 Subject: COMPMID-3637: Move wrapper to src Signed-off-by: Georgios Pinitas Change-Id: I524b0c4b49c7a7035b7d078b9585d77b0d438e10 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4083 Reviewed-by: Michele Di Giorgio Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins --- src/core/NEON/wrapper/intrinsics/abs.h | 75 +++++++++ src/core/NEON/wrapper/intrinsics/add.h | 201 ++++++++++++++++++++++ src/core/NEON/wrapper/intrinsics/and.h | 60 +++++++ src/core/NEON/wrapper/intrinsics/bsl.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/ceq.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/cge.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/cgt.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/cle.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/clt.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/combine.h | 53 ++++++ src/core/NEON/wrapper/intrinsics/cvt.h | 96 +++++++++++ src/core/NEON/wrapper/intrinsics/div.h | 73 ++++++++ src/core/NEON/wrapper/intrinsics/dup_n.h | 66 ++++++++ src/core/NEON/wrapper/intrinsics/eor.h | 56 +++++++ src/core/NEON/wrapper/intrinsics/exp.h | 56 +++++++ src/core/NEON/wrapper/intrinsics/ext.h | 62 +++++++ src/core/NEON/wrapper/intrinsics/gethigh.h | 53 ++++++ src/core/NEON/wrapper/intrinsics/getlane.h | 223 +++++++++++++++++++++++++ src/core/NEON/wrapper/intrinsics/getlow.h | 53 ++++++ src/core/NEON/wrapper/intrinsics/intrinsics.h | 74 ++++++++ src/core/NEON/wrapper/intrinsics/inv.h | 62 +++++++ src/core/NEON/wrapper/intrinsics/invsqrt.h | 61 +++++++ src/core/NEON/wrapper/intrinsics/load.h | 73 ++++++++ src/core/NEON/wrapper/intrinsics/log.h | 56 +++++++ src/core/NEON/wrapper/intrinsics/max.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/min.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/mla.h | 71 ++++++++ src/core/NEON/wrapper/intrinsics/movl.h | 49 ++++++ src/core/NEON/wrapper/intrinsics/movn.h | 62 +++++++ src/core/NEON/wrapper/intrinsics/mul.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/neg.h | 58 +++++++ src/core/NEON/wrapper/intrinsics/not.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/orr.h | 60 +++++++ src/core/NEON/wrapper/intrinsics/pmax.h | 53 ++++++ src/core/NEON/wrapper/intrinsics/pmin.h | 53 ++++++ src/core/NEON/wrapper/intrinsics/pow.h | 48 ++++++ src/core/NEON/wrapper/intrinsics/qmov.h | 49 ++++++ src/core/NEON/wrapper/intrinsics/qmovun.h | 46 +++++ src/core/NEON/wrapper/intrinsics/reinterpret.h | 49 ++++++ src/core/NEON/wrapper/intrinsics/rev64.h | 64 +++++++ src/core/NEON/wrapper/intrinsics/round.h | 56 +++++++ src/core/NEON/wrapper/intrinsics/setlane.h | 208 +++++++++++++++++++++++ src/core/NEON/wrapper/intrinsics/sin.h | 57 +++++++ src/core/NEON/wrapper/intrinsics/store.h | 70 ++++++++ src/core/NEON/wrapper/intrinsics/sub.h | 103 ++++++++++++ src/core/NEON/wrapper/intrinsics/tanh.h | 47 ++++++ src/core/NEON/wrapper/intrinsics/tbl.h | 45 +++++ src/core/NEON/wrapper/scalar/add.h | 69 ++++++++ src/core/NEON/wrapper/scalar/scalar.h | 30 ++++ src/core/NEON/wrapper/scalar/sub.h | 69 ++++++++ src/core/NEON/wrapper/traits.h | 140 ++++++++++++++++ src/core/NEON/wrapper/wrapper.h | 34 ++++ 52 files changed, 3683 insertions(+) create mode 100644 src/core/NEON/wrapper/intrinsics/abs.h create mode 100644 src/core/NEON/wrapper/intrinsics/add.h create mode 100644 src/core/NEON/wrapper/intrinsics/and.h create mode 100644 src/core/NEON/wrapper/intrinsics/bsl.h create mode 100644 src/core/NEON/wrapper/intrinsics/ceq.h create mode 100644 src/core/NEON/wrapper/intrinsics/cge.h create mode 100644 src/core/NEON/wrapper/intrinsics/cgt.h create mode 100644 src/core/NEON/wrapper/intrinsics/cle.h create mode 100644 src/core/NEON/wrapper/intrinsics/clt.h create mode 100644 src/core/NEON/wrapper/intrinsics/combine.h create mode 100644 src/core/NEON/wrapper/intrinsics/cvt.h create mode 100644 src/core/NEON/wrapper/intrinsics/div.h create mode 100644 src/core/NEON/wrapper/intrinsics/dup_n.h create mode 100644 src/core/NEON/wrapper/intrinsics/eor.h create mode 100644 src/core/NEON/wrapper/intrinsics/exp.h create mode 100644 src/core/NEON/wrapper/intrinsics/ext.h create mode 100644 src/core/NEON/wrapper/intrinsics/gethigh.h create mode 100644 src/core/NEON/wrapper/intrinsics/getlane.h create mode 100644 src/core/NEON/wrapper/intrinsics/getlow.h create mode 100644 src/core/NEON/wrapper/intrinsics/intrinsics.h create mode 100644 src/core/NEON/wrapper/intrinsics/inv.h create mode 100644 src/core/NEON/wrapper/intrinsics/invsqrt.h create mode 100644 src/core/NEON/wrapper/intrinsics/load.h create mode 100644 src/core/NEON/wrapper/intrinsics/log.h create mode 100644 src/core/NEON/wrapper/intrinsics/max.h create mode 100644 src/core/NEON/wrapper/intrinsics/min.h create mode 100644 src/core/NEON/wrapper/intrinsics/mla.h create mode 100644 src/core/NEON/wrapper/intrinsics/movl.h create mode 100644 src/core/NEON/wrapper/intrinsics/movn.h create mode 100644 src/core/NEON/wrapper/intrinsics/mul.h create mode 100644 src/core/NEON/wrapper/intrinsics/neg.h create mode 100644 src/core/NEON/wrapper/intrinsics/not.h create mode 100644 src/core/NEON/wrapper/intrinsics/orr.h create mode 100644 src/core/NEON/wrapper/intrinsics/pmax.h create mode 100644 src/core/NEON/wrapper/intrinsics/pmin.h create mode 100644 src/core/NEON/wrapper/intrinsics/pow.h create mode 100644 src/core/NEON/wrapper/intrinsics/qmov.h create mode 100644 src/core/NEON/wrapper/intrinsics/qmovun.h create mode 100644 src/core/NEON/wrapper/intrinsics/reinterpret.h create mode 100644 src/core/NEON/wrapper/intrinsics/rev64.h create mode 100644 src/core/NEON/wrapper/intrinsics/round.h create mode 100644 src/core/NEON/wrapper/intrinsics/setlane.h create mode 100644 src/core/NEON/wrapper/intrinsics/sin.h create mode 100644 src/core/NEON/wrapper/intrinsics/store.h create mode 100644 src/core/NEON/wrapper/intrinsics/sub.h create mode 100644 src/core/NEON/wrapper/intrinsics/tanh.h create mode 100644 src/core/NEON/wrapper/intrinsics/tbl.h create mode 100644 src/core/NEON/wrapper/scalar/add.h create mode 100644 src/core/NEON/wrapper/scalar/scalar.h create mode 100644 src/core/NEON/wrapper/scalar/sub.h create mode 100644 src/core/NEON/wrapper/traits.h create mode 100644 src/core/NEON/wrapper/wrapper.h (limited to 'src/core/NEON/wrapper') diff --git a/src/core/NEON/wrapper/intrinsics/abs.h b/src/core/NEON/wrapper/intrinsics/abs.h new file mode 100644 index 0000000000..0d49a9ebf1 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/abs.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_ABS_H +#define ARM_COMPUTE_WRAPPER_ABS_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VABS_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vabs(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +#define VQABS_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vqabs(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +// Absolute: vabs{q}_. Vd[i] = |Va[i]| +VABS_IMPL(int8x8_t, int8x8_t, vabs, s8) +VABS_IMPL(int16x4_t, int16x4_t, vabs, s16) +VABS_IMPL(int32x2_t, int32x2_t, vabs, s32) +VABS_IMPL(float32x2_t, float32x2_t, vabs, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VABS_IMPL(float16x4_t, float16x4_t, vabs, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VABS_IMPL(int8x16_t, int8x16_t, vabsq, s8) +VABS_IMPL(int16x8_t, int16x8_t, vabsq, s16) +VABS_IMPL(int32x4_t, int32x4_t, vabsq, s32) +VABS_IMPL(float32x4_t, float32x4_t, vabsq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VABS_IMPL(float16x8_t, float16x8_t, vabsq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +// Saturating absolute: vqabs{q}_. Vd[i] = sat(|Va[i]|) +VQABS_IMPL(int8x8_t, int8x8_t, vqabs, s8) +VQABS_IMPL(int16x4_t, int16x4_t, vqabs, s16) +VQABS_IMPL(int32x2_t, int32x2_t, vqabs, s32) + +VQABS_IMPL(int8x16_t, int8x16_t, vqabsq, s8) +VQABS_IMPL(int16x8_t, int16x8_t, vqabsq, s16) +VQABS_IMPL(int32x4_t, int32x4_t, vqabsq, s32) + +#undef VABS_IMPL +#undef VQABS_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_ABS_H */ diff --git a/src/core/NEON/wrapper/intrinsics/add.h b/src/core/NEON/wrapper/intrinsics/add.h new file mode 100644 index 0000000000..6134d75b29 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/add.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_ADD_H +#define ARM_COMPUTE_WRAPPER_ADD_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VADD_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vadd(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VADD_IMPL(uint8x8_t, uint8x8_t, vadd, u8) +VADD_IMPL(int8x8_t, int8x8_t, vadd, s8) +VADD_IMPL(uint16x4_t, uint16x4_t, vadd, u16) +VADD_IMPL(int16x4_t, int16x4_t, vadd, s16) +VADD_IMPL(uint32x2_t, uint32x2_t, vadd, u32) +VADD_IMPL(int32x2_t, int32x2_t, vadd, s32) +VADD_IMPL(uint64x1_t, uint64x1_t, vadd, u64) +VADD_IMPL(int64x1_t, int64x1_t, vadd, s64) +VADD_IMPL(float32x2_t, float32x2_t, vadd, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VADD_IMPL(float16x4_t, float16x4_t, vadd, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VADD_IMPL(uint8x16_t, uint8x16_t, vaddq, u8) +VADD_IMPL(int8x16_t, int8x16_t, vaddq, s8) +VADD_IMPL(uint16x8_t, uint16x8_t, vaddq, u16) +VADD_IMPL(int16x8_t, int16x8_t, vaddq, s16) +VADD_IMPL(uint32x4_t, uint32x4_t, vaddq, u32) +VADD_IMPL(int32x4_t, int32x4_t, vaddq, s32) +VADD_IMPL(uint64x2_t, uint64x2_t, vaddq, u64) +VADD_IMPL(int64x2_t, int64x2_t, vaddq, s64) +VADD_IMPL(float32x4_t, float32x4_t, vaddq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VADD_IMPL(float16x8_t, float16x8_t, vaddq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#undef VADD_IMPL + +// VQADD: Vector saturating add (No notion of saturation for floating point) +#define VQADD_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vqadd(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VQADD_IMPL(uint8x8_t, uint8x8_t, vqadd, u8) +VQADD_IMPL(int8x8_t, int8x8_t, vqadd, s8) +VQADD_IMPL(uint16x4_t, uint16x4_t, vqadd, u16) +VQADD_IMPL(int16x4_t, int16x4_t, vqadd, s16) +VQADD_IMPL(uint32x2_t, uint32x2_t, vqadd, u32) +VQADD_IMPL(int32x2_t, int32x2_t, vqadd, s32) +VQADD_IMPL(uint64x1_t, uint64x1_t, vqadd, u64) +VQADD_IMPL(int64x1_t, int64x1_t, vqadd, s64) +VQADD_IMPL(float32x2_t, float32x2_t, vadd, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VQADD_IMPL(float16x4_t, float16x4_t, vadd, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VQADD_IMPL(uint8x16_t, uint8x16_t, vqaddq, u8) +VQADD_IMPL(int8x16_t, int8x16_t, vqaddq, s8) +VQADD_IMPL(uint16x8_t, uint16x8_t, vqaddq, u16) +VQADD_IMPL(int16x8_t, int16x8_t, vqaddq, s16) +VQADD_IMPL(uint32x4_t, uint32x4_t, vqaddq, u32) +VQADD_IMPL(int32x4_t, int32x4_t, vqaddq, s32) +VQADD_IMPL(uint64x2_t, uint64x2_t, vqaddq, u64) +VQADD_IMPL(int64x2_t, int64x2_t, vqaddq, s64) +VQADD_IMPL(float32x4_t, float32x4_t, vaddq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VQADD_IMPL(float16x8_t, float16x8_t, vaddq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#undef VQADD_IMPL + +// VADDW: Vector widening add +#define VADDW_IMPL(wtype, vtype, prefix, postfix) \ + inline wtype vaddw(const wtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VADDW_IMPL(uint16x8_t, uint8x8_t, vaddw, u8) +VADDW_IMPL(int16x8_t, int8x8_t, vaddw, s8) +VADDW_IMPL(uint32x4_t, uint16x4_t, vaddw, u16) +VADDW_IMPL(int32x4_t, int16x4_t, vaddw, s16) +VADDW_IMPL(uint64x2_t, uint32x2_t, vaddw, u32) +VADDW_IMPL(int64x2_t, int32x2_t, vaddw, s32) +#undef VADDW_IMPL + +// VADDL: Vector long add +#define VADDL_IMPL(wtype, vtype, prefix, postfix) \ + inline wtype vaddl(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VADDL_IMPL(uint16x8_t, uint8x8_t, vaddl, u8) +VADDL_IMPL(int16x8_t, int8x8_t, vaddl, s8) +VADDL_IMPL(uint32x4_t, uint16x4_t, vaddl, u16) +VADDL_IMPL(int32x4_t, int16x4_t, vaddl, s16) +VADDL_IMPL(uint64x2_t, uint32x2_t, vaddl, u32) +VADDL_IMPL(int64x2_t, int32x2_t, vaddl, s32) +#undef VADDL_IMPL + +#if defined(__aarch64__) +// VADDV: Across vector add +#define VADDV_IMPL(stype, vtype, prefix, postfix) \ + inline stype vaddv(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VADDV_IMPL(uint8_t, uint8x8_t, vaddv, u8) +VADDV_IMPL(int8_t, int8x8_t, vaddv, s8) +VADDV_IMPL(uint16_t, uint16x4_t, vaddv, u16) +VADDV_IMPL(int16_t, int16x4_t, vaddv, s16) +VADDV_IMPL(uint32_t, uint32x2_t, vaddv, u32) +VADDV_IMPL(int32_t, int32x2_t, vaddv, s32) +VADDV_IMPL(float, float32x2_t, vaddv, f32) + +VADDV_IMPL(uint8_t, uint8x16_t, vaddvq, u8) +VADDV_IMPL(int8_t, int8x16_t, vaddvq, s8) +VADDV_IMPL(uint16_t, uint16x8_t, vaddvq, u16) +VADDV_IMPL(int16_t, int16x8_t, vaddvq, s16) +VADDV_IMPL(uint32_t, uint32x4_t, vaddvq, u32) +VADDV_IMPL(int32_t, int32x4_t, vaddvq, s32) +VADDV_IMPL(uint64_t, uint64x2_t, vaddvq, u64) +VADDV_IMPL(int64_t, int64x2_t, vaddvq, s64) +VADDV_IMPL(float, float32x4_t, vaddvq, f32) +#undef VADDV_IMPL +#endif // defined(__aarch64__) + +// VPADDL: Signed add long pairwise +#define VPADDL_IMPL(ltype, vtype, prefix, postfix) \ + inline ltype vpaddl(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VPADDL_IMPL(uint16x4_t, uint8x8_t, vpaddl, u8) +VPADDL_IMPL(int16x4_t, int8x8_t, vpaddl, s8) +VPADDL_IMPL(uint32x2_t, uint16x4_t, vpaddl, u16) +VPADDL_IMPL(int32x2_t, int16x4_t, vpaddl, s16) +VPADDL_IMPL(uint64x1_t, uint32x2_t, vpaddl, u32) +VPADDL_IMPL(int64x1_t, int32x2_t, vpaddl, s32) + +VPADDL_IMPL(uint16x8_t, uint8x16_t, vpaddlq, u8) +VPADDL_IMPL(int16x8_t, int8x16_t, vpaddlq, s8) +VPADDL_IMPL(uint32x4_t, uint16x8_t, vpaddlq, u16) +VPADDL_IMPL(int32x4_t, int16x8_t, vpaddlq, s16) +VPADDL_IMPL(uint64x2_t, uint32x4_t, vpaddlq, u32) +VPADDL_IMPL(int64x2_t, int32x4_t, vpaddlq, s32) +#undef VPADDL_IMPL + +// VPADD: Add pairwise +#define VPADD_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vpadd(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8) +VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8) +VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16) +VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16) +VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32) +VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32) +VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VPADD_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_ADD_H */ diff --git a/src/core/NEON/wrapper/intrinsics/and.h b/src/core/NEON/wrapper/intrinsics/and.h new file mode 100644 index 0000000000..6ff7df3f5a --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/and.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_AND_H +#define ARM_COMPUTE_WRAPPER_AND_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VAND_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vand(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VAND_IMPL(uint8_t, uint8x8_t, vand, u8) +VAND_IMPL(int8_t, int8x8_t, vand, s8) +VAND_IMPL(uint16_t, uint16x4_t, vand, u16) +VAND_IMPL(int16_t, int16x4_t, vand, s16) +VAND_IMPL(uint32_t, uint32x2_t, vand, u32) +VAND_IMPL(int32_t, int32x2_t, vand, s32) +VAND_IMPL(uint64_t, uint64x1_t, vand, u64) +VAND_IMPL(int64_t, int64x1_t, vand, s64) + +VAND_IMPL(uint8_t, uint8x16_t, vandq, u8) +VAND_IMPL(int8_t, int8x16_t, vandq, s8) +VAND_IMPL(uint16_t, uint16x8_t, vandq, u16) +VAND_IMPL(int16_t, int16x8_t, vandq, s16) +VAND_IMPL(uint32_t, uint32x4_t, vandq, u32) +VAND_IMPL(int32_t, int32x4_t, vandq, s32) +VAND_IMPL(uint64_t, uint64x2_t, vandq, u64) +VAND_IMPL(int64_t, int64x2_t, vandq, s64) + +#undef VAND_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_AND_H */ diff --git a/src/core/NEON/wrapper/intrinsics/bsl.h b/src/core/NEON/wrapper/intrinsics/bsl.h new file mode 100644 index 0000000000..01c1cce3a6 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/bsl.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_BSL_H +#define ARM_COMPUTE_WRAPPER_BSL_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VBSL_IMPL(stype, vtype, ctype, prefix, postfix) \ + inline vtype vbsl(const ctype &a, const vtype &b, const vtype &c) \ + { \ + return prefix##_##postfix(a, b, c); \ + } + +VBSL_IMPL(uint8_t, uint8x8_t, uint8x8_t, vbsl, u8) +VBSL_IMPL(int8_t, int8x8_t, uint8x8_t, vbsl, s8) +VBSL_IMPL(uint16_t, uint16x4_t, uint16x4_t, vbsl, u16) +VBSL_IMPL(int16_t, int16x4_t, uint16x4_t, vbsl, s16) +VBSL_IMPL(uint32_t, uint32x2_t, uint32x2_t, vbsl, u32) +VBSL_IMPL(int32_t, int32x2_t, uint32x2_t, vbsl, s32) +VBSL_IMPL(float32x2_t, float32x2_t, uint32x2_t, vbsl, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VBSL_IMPL(float16x4_t, float16x4_t, uint16x4_t, vbsl, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VBSL_IMPL(uint8_t, uint8x16_t, uint8x16_t, vbslq, u8) +VBSL_IMPL(int8_t, int8x16_t, uint8x16_t, vbslq, s8) +VBSL_IMPL(uint16_t, uint16x8_t, uint16x8_t, vbslq, u16) +VBSL_IMPL(int16_t, int16x8_t, uint16x8_t, vbslq, s16) +VBSL_IMPL(uint32_t, uint32x4_t, uint32x4_t, vbslq, u32) +VBSL_IMPL(int32_t, int32x4_t, uint32x4_t, vbslq, s32) +VBSL_IMPL(float32x4_t, float32x4_t, uint32x4_t, vbslq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VBSL_IMPL(float16x8_t, float16x8_t, uint16x8_t, vbslq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VBSL_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_BSL_H */ diff --git a/src/core/NEON/wrapper/intrinsics/ceq.h b/src/core/NEON/wrapper/intrinsics/ceq.h new file mode 100644 index 0000000000..b0324e63db --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/ceq.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CEQ_H +#define ARM_COMPUTE_WRAPPER_CEQ_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCEQ_IMPL(votype, vtype, prefix, postfix) \ + inline votype vceq(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VCEQ_IMPL(uint8x8_t, uint8x8_t, vceq, u8) +VCEQ_IMPL(uint8x8_t, int8x8_t, vceq, s8) +VCEQ_IMPL(uint16x4_t, uint16x4_t, vceq, u16) +VCEQ_IMPL(uint16x4_t, int16x4_t, vceq, s16) +VCEQ_IMPL(uint32x2_t, uint32x2_t, vceq, u32) +VCEQ_IMPL(uint32x2_t, int32x2_t, vceq, s32) +VCEQ_IMPL(uint32x2_t, float32x2_t, vceq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCEQ_IMPL(uint16x4_t, float16x4_t, vceq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VCEQ_IMPL(uint8x16_t, uint8x16_t, vceqq, u8) +VCEQ_IMPL(uint8x16_t, int8x16_t, vceqq, s8) +VCEQ_IMPL(uint16x8_t, uint16x8_t, vceqq, u16) +VCEQ_IMPL(uint16x8_t, int16x8_t, vceqq, s16) +VCEQ_IMPL(uint32x4_t, uint32x4_t, vceqq, u32) +VCEQ_IMPL(uint32x4_t, int32x4_t, vceqq, s32) +VCEQ_IMPL(uint32x4_t, float32x4_t, vceqq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCEQ_IMPL(uint16x8_t, float16x8_t, vceqq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VCEQ_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CEQ_H */ diff --git a/src/core/NEON/wrapper/intrinsics/cge.h b/src/core/NEON/wrapper/intrinsics/cge.h new file mode 100644 index 0000000000..e4a7fcd423 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/cge.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CGE_H +#define ARM_COMPUTE_WRAPPER_CGE_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCGE_IMPL(stype, vtype, rtype, prefix, postfix) \ + inline rtype vcge(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VCGE_IMPL(uint8_t, uint8x8_t, uint8x8_t, vcge, u8) +VCGE_IMPL(int8_t, int8x8_t, uint8x8_t, vcge, s8) +VCGE_IMPL(uint16_t, uint16x4_t, uint16x4_t, vcge, u16) +VCGE_IMPL(int16_t, int16x4_t, uint16x4_t, vcge, s16) +VCGE_IMPL(uint32_t, uint32x2_t, uint32x2_t, vcge, u32) +VCGE_IMPL(int32_t, int32x2_t, uint32x2_t, vcge, s32) +VCGE_IMPL(float32x2_t, float32x2_t, uint32x2_t, vcge, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCGE_IMPL(float16x4_t, float16x4_t, uint16x4_t, vcge, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VCGE_IMPL(uint8_t, uint8x16_t, uint8x16_t, vcgeq, u8) +VCGE_IMPL(int8_t, int8x16_t, uint8x16_t, vcgeq, s8) +VCGE_IMPL(uint16_t, uint16x8_t, uint16x8_t, vcgeq, u16) +VCGE_IMPL(int16_t, int16x8_t, uint16x8_t, vcgeq, s16) +VCGE_IMPL(uint32_t, uint32x4_t, uint32x4_t, vcgeq, u32) +VCGE_IMPL(int32_t, int32x4_t, uint32x4_t, vcgeq, s32) +VCGE_IMPL(float32x4_t, float32x4_t, uint32x4_t, vcgeq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCGE_IMPL(float16x8_t, float16x8_t, uint16x8_t, vcgeq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VCGE_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CGE_H */ diff --git a/src/core/NEON/wrapper/intrinsics/cgt.h b/src/core/NEON/wrapper/intrinsics/cgt.h new file mode 100644 index 0000000000..f34d02fd1b --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/cgt.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CGT_H +#define ARM_COMPUTE_WRAPPER_CGT_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCGT_IMPL(rtype, vtype, prefix, postfix) \ + inline rtype vcgt(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VCGT_IMPL(uint8x8_t, uint8x8_t, vcgt, u8) +VCGT_IMPL(uint8x8_t, int8x8_t, vcgt, s8) +VCGT_IMPL(uint16x4_t, uint16x4_t, vcgt, u16) +VCGT_IMPL(uint16x4_t, int16x4_t, vcgt, s16) +VCGT_IMPL(uint32x2_t, uint32x2_t, vcgt, u32) +VCGT_IMPL(uint32x2_t, int32x2_t, vcgt, s32) +VCGT_IMPL(uint32x2_t, float32x2_t, vcgt, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCGT_IMPL(uint16x4_t, float16x4_t, vcgt, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VCGT_IMPL(uint8x16_t, uint8x16_t, vcgtq, u8) +VCGT_IMPL(uint8x16_t, int8x16_t, vcgtq, s8) +VCGT_IMPL(uint16x8_t, uint16x8_t, vcgtq, u16) +VCGT_IMPL(uint16x8_t, int16x8_t, vcgtq, s16) +VCGT_IMPL(uint32x4_t, uint32x4_t, vcgtq, u32) +VCGT_IMPL(uint32x4_t, int32x4_t, vcgtq, s32) +VCGT_IMPL(uint32x4_t, float32x4_t, vcgtq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCGT_IMPL(uint16x8_t, float16x8_t, vcgtq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VCGT_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CGT_H */ diff --git a/src/core/NEON/wrapper/intrinsics/cle.h b/src/core/NEON/wrapper/intrinsics/cle.h new file mode 100644 index 0000000000..50c175f0c8 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/cle.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CLE_H +#define ARM_COMPUTE_WRAPPER_CLE_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCLE_IMPL(stype, vtype, rtype, prefix, postfix) \ + inline rtype vcle(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VCLE_IMPL(uint8_t, uint8x8_t, uint8x8_t, vcle, u8) +VCLE_IMPL(int8_t, int8x8_t, uint8x8_t, vcle, s8) +VCLE_IMPL(uint16_t, uint16x4_t, uint16x4_t, vcle, u16) +VCLE_IMPL(int16_t, int16x4_t, uint16x4_t, vcle, s16) +VCLE_IMPL(uint32_t, uint32x2_t, uint32x2_t, vcle, u32) +VCLE_IMPL(int32_t, int32x2_t, uint32x2_t, vcle, s32) +VCLE_IMPL(float32x2_t, float32x2_t, uint32x2_t, vcle, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCLE_IMPL(float16x4_t, float16x4_t, uint16x4_t, vcle, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VCLE_IMPL(uint8_t, uint8x16_t, uint8x16_t, vcleq, u8) +VCLE_IMPL(int8_t, int8x16_t, uint8x16_t, vcleq, s8) +VCLE_IMPL(uint16_t, uint16x8_t, uint16x8_t, vcleq, u16) +VCLE_IMPL(int16_t, int16x8_t, uint16x8_t, vcleq, s16) +VCLE_IMPL(uint32_t, uint32x4_t, uint32x4_t, vcleq, u32) +VCLE_IMPL(int32_t, int32x4_t, uint32x4_t, vcleq, s32) +VCLE_IMPL(float32x4_t, float32x4_t, uint32x4_t, vcleq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCLE_IMPL(float16x8_t, float16x8_t, uint16x8_t, vcleq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VCLE_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CLE_H */ diff --git a/src/core/NEON/wrapper/intrinsics/clt.h b/src/core/NEON/wrapper/intrinsics/clt.h new file mode 100644 index 0000000000..10fd320e4c --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/clt.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CLT_H +#define ARM_COMPUTE_WRAPPER_CLT_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCLT_IMPL(votype, vtype, prefix, postfix) \ + inline votype vclt(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VCLT_IMPL(uint8x8_t, uint8x8_t, vclt, u8) +VCLT_IMPL(uint8x8_t, int8x8_t, vclt, s8) +VCLT_IMPL(uint16x4_t, uint16x4_t, vclt, u16) +VCLT_IMPL(uint16x4_t, int16x4_t, vclt, s16) +VCLT_IMPL(uint32x2_t, uint32x2_t, vclt, u32) +VCLT_IMPL(uint32x2_t, int32x2_t, vclt, s32) +VCLT_IMPL(uint32x2_t, float32x2_t, vclt, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCLT_IMPL(uint16x4_t, float16x4_t, vclt, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VCLT_IMPL(uint8x16_t, uint8x16_t, vcltq, u8) +VCLT_IMPL(uint8x16_t, int8x16_t, vcltq, s8) +VCLT_IMPL(uint16x8_t, uint16x8_t, vcltq, u16) +VCLT_IMPL(uint16x8_t, int16x8_t, vcltq, s16) +VCLT_IMPL(uint32x4_t, uint32x4_t, vcltq, u32) +VCLT_IMPL(uint32x4_t, int32x4_t, vcltq, s32) +VCLT_IMPL(uint32x4_t, float32x4_t, vcltq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCLT_IMPL(uint16x8_t, float16x8_t, vcltq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VCLT_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CLT_H */ diff --git a/src/core/NEON/wrapper/intrinsics/combine.h b/src/core/NEON/wrapper/intrinsics/combine.h new file mode 100644 index 0000000000..8b6a588f51 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/combine.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_COMBINE_H +#define ARM_COMPUTE_WRAPPER_COMBINE_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCOMBINE_IMPL(rtype, vtype, prefix, postfix) \ + inline rtype vcombine(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VCOMBINE_IMPL(uint8x16_t, uint8x8_t, vcombine, u8) +VCOMBINE_IMPL(int8x16_t, int8x8_t, vcombine, s8) +VCOMBINE_IMPL(uint16x8_t, uint16x4_t, vcombine, u16) +VCOMBINE_IMPL(int16x8_t, int16x4_t, vcombine, s16) +VCOMBINE_IMPL(uint32x4_t, uint32x2_t, vcombine, u32) +VCOMBINE_IMPL(int32x4_t, int32x2_t, vcombine, s32) +VCOMBINE_IMPL(float32x4_t, float32x2_t, vcombine, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCOMBINE_IMPL(float16x8_t, float16x4_t, vcombine, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VCOMBINE_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_COMBINE_H */ diff --git a/src/core/NEON/wrapper/intrinsics/cvt.h b/src/core/NEON/wrapper/intrinsics/cvt.h new file mode 100644 index 0000000000..6e79a92bc2 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/cvt.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CVT_H +#define ARM_COMPUTE_WRAPPER_CVT_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCVT_TO_F32_IMPL(ptype, vtype, prefix, postfix1, postfix2) \ + template \ + inline typename std::enable_if::value, float32x4_t>::type \ + vcvt(const vtype &a) \ + { \ + return prefix##_##postfix1##_##postfix2(a); \ + } + +VCVT_TO_F32_IMPL(float32x4_t, uint32x4_t, vcvtq, f32, u32) +VCVT_TO_F32_IMPL(float32x4_t, int32x4_t, vcvtq, f32, s32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCVT_TO_F32_IMPL(float32x4_t, float16x4_t, vcvt, f32, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#undef VCVT_TO_F32_IMPL + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#define VCVT_TO_F16_IMPL(ptype, vtype, prefix, postfix1, postfix2) \ + template \ + inline typename std::enable_if::value, float16x4_t>::type \ + vcvt(const vtype &a) \ + { \ + return prefix##_##postfix1##_##postfix2(a); \ + } + +VCVT_TO_F16_IMPL(float16x4_t, float32x4_t, vcvt, f16, f32) +#undef VCVT_TO_F16_IMPL +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +template +inline typename std::enable_if::value, uint32x4_t>::type +vcvt(const float32x4_t &a) +{ + return vcvtq_u32_f32(a); +} + +template +inline typename std::enable_if::value, int32x4_t>::type +vcvt(const float32x4_t &a) +{ + return vcvtq_s32_f32(a); +} + +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) +/** Convert 2x128-bit floating point vectors into 1x128-bit bfloat16 vector + * + * @param[in] inptr Pointer to the input memory to load values from + * @param[in,out] outptr Pointer to the output memory to store values to + */ +inline void vcvt_bf16_f32(const float *inptr, uint16_t *outptr) +{ + __asm __volatile( + "ldp q0, q1, [%[inptr]]\n" + ".inst 0xea16800\n" // BFCVTN v0, v0 + ".inst 0x4ea16820\n" // BFCVTN2 v0, v1 + "str q0, [%[outptr]]\n" + : [inptr] "+r"(inptr) + : [outptr] "r"(outptr) + : "v0", "v1", "memory"); +} +#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ + +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CVT_H */ diff --git a/src/core/NEON/wrapper/intrinsics/div.h b/src/core/NEON/wrapper/intrinsics/div.h new file mode 100644 index 0000000000..265f30d33b --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/div.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_DIV_H +#define ARM_COMPUTE_WRAPPER_DIV_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#ifdef __aarch64__ + +#define VDIV_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vdiv(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } +VDIV_IMPL(float32x2_t, float32x2_t, vdiv, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x4_t, float16x4_t, vdiv, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VDIV_IMPL(float32x4_t, float32x4_t, vdivq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x8_t, float16x8_t, vdivq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#else // __aarch64__ + +#define VDIV_IMPL(stype, vtype, mul_prefix, inv_prefix, postfix) \ + inline vtype vdiv(const vtype &a, const vtype &b) \ + { \ + return mul_prefix##_##postfix(a, inv_prefix##_##postfix(b)); \ + } +VDIV_IMPL(float32x2_t, float32x2_t, vmul, vinv, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x4_t, float16x4_t, vmul, vinv, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VDIV_IMPL(float32x4_t, float32x4_t, vmulq, vinvq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDIV_IMPL(float16x8_t, float16x8_t, vmulq, vinvq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#endif // __aarch64__ + +#undef VDIV_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_DIV_H */ diff --git a/src/core/NEON/wrapper/intrinsics/dup_n.h b/src/core/NEON/wrapper/intrinsics/dup_n.h new file mode 100644 index 0000000000..e745aa4a8c --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/dup_n.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_DUP_N_H +#define ARM_COMPUTE_WRAPPER_DUP_N_H + +#include "src/core/NEON/wrapper/traits.h" + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VDUP_N_IMPL(stype, vtype, prefix, postfix, tag) \ + inline vtype vdup_n(stype value, tag) \ + { \ + return prefix##_##postfix(value); \ + } + +VDUP_N_IMPL(uint8_t, uint8x8_t, vdup_n, u8, traits::vector_64_tag) +VDUP_N_IMPL(int8_t, int8x8_t, vdup_n, s8, traits::vector_64_tag) +VDUP_N_IMPL(uint16_t, uint16x4_t, vdup_n, u16, traits::vector_64_tag) +VDUP_N_IMPL(int16_t, int16x4_t, vdup_n, s16, traits::vector_64_tag) +VDUP_N_IMPL(uint32_t, uint32x2_t, vdup_n, u32, traits::vector_64_tag) +VDUP_N_IMPL(int32_t, int32x2_t, vdup_n, s32, traits::vector_64_tag) +VDUP_N_IMPL(float, float32x2_t, vdup_n, f32, traits::vector_64_tag) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDUP_N_IMPL(float16_t, float16x4_t, vdup_n, f16, traits::vector_64_tag) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VDUP_N_IMPL(uint8_t, uint8x16_t, vdupq_n, u8, traits::vector_128_tag) +VDUP_N_IMPL(int8_t, int8x16_t, vdupq_n, s8, traits::vector_128_tag) +VDUP_N_IMPL(uint16_t, uint16x8_t, vdupq_n, u16, traits::vector_128_tag) +VDUP_N_IMPL(int16_t, int16x8_t, vdupq_n, s16, traits::vector_128_tag) +VDUP_N_IMPL(uint32_t, uint32x4_t, vdupq_n, u32, traits::vector_128_tag) +VDUP_N_IMPL(int32_t, int32x4_t, vdupq_n, s32, traits::vector_128_tag) +VDUP_N_IMPL(float, float32x4_t, vdupq_n, f32, traits::vector_128_tag) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VDUP_N_IMPL(float16_t, float16x8_t, vdupq_n, f16, traits::vector_128_tag) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VDUP_N_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_DUP_N_H */ diff --git a/src/core/NEON/wrapper/intrinsics/eor.h b/src/core/NEON/wrapper/intrinsics/eor.h new file mode 100644 index 0000000000..ce88cf59e7 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/eor.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_EOR_H +#define ARM_COMPUTE_WRAPPER_EOR_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VEOR_IMPL(vtype, prefix, postfix) \ + inline vtype veor(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VEOR_IMPL(uint8x8_t, veor, u8) +VEOR_IMPL(int8x8_t, veor, s8) +VEOR_IMPL(uint16x4_t, veor, u16) +VEOR_IMPL(int16x4_t, veor, s16) +VEOR_IMPL(uint32x2_t, veor, u32) +VEOR_IMPL(int32x2_t, veor, s32) + +VEOR_IMPL(uint8x16_t, veorq, u8) +VEOR_IMPL(int8x16_t, veorq, s8) +VEOR_IMPL(uint16x8_t, veorq, u16) +VEOR_IMPL(int16x8_t, veorq, s16) +VEOR_IMPL(uint32x4_t, veorq, u32) +VEOR_IMPL(int32x4_t, veorq, s32) + +#undef VEOR_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_EOR_H */ diff --git a/src/core/NEON/wrapper/intrinsics/exp.h b/src/core/NEON/wrapper/intrinsics/exp.h new file mode 100644 index 0000000000..c2a6970967 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/exp.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_EXP_H +#define ARM_COMPUTE_WRAPPER_EXP_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VEXPQ_IMPL(vtype, postfix) \ + inline vtype vexpq(const vtype &a) \ + { \ + return vexpq_##postfix(a); \ + } + +#define VEXPQ_IMPL_INT(vtype, postfix) \ + inline vtype vexpq(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + +VEXPQ_IMPL(float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VEXPQ_IMPL(float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VEXPQ_IMPL_INT(int32x4_t, s32) +#undef VEXPQ_IMPL + +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_EXP_H */ diff --git a/src/core/NEON/wrapper/intrinsics/ext.h b/src/core/NEON/wrapper/intrinsics/ext.h new file mode 100644 index 0000000000..d44b231bb2 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/ext.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_EXT_H +#define ARM_COMPUTE_WRAPPER_EXT_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VEXT_IMPL(vtype, prefix, postfix, size) \ + inline vtype vext_##size(vtype value_a, vtype value_b) \ + { \ + return prefix##_##postfix(value_a, value_b, size); \ + } + +VEXT_IMPL(uint8x8_t, vext, u8, 1) +VEXT_IMPL(uint8x8_t, vext, u8, 2) +VEXT_IMPL(int8x8_t, vext, s8, 1) +VEXT_IMPL(int8x8_t, vext, s8, 2) +VEXT_IMPL(uint16x4_t, vext, u16, 1) +VEXT_IMPL(uint16x4_t, vext, u16, 2) +VEXT_IMPL(int16x4_t, vext, s16, 1) +VEXT_IMPL(int16x4_t, vext, s16, 2) + +VEXT_IMPL(uint8x16_t, vextq, u8, 1) +VEXT_IMPL(uint8x16_t, vextq, u8, 2) +VEXT_IMPL(int8x16_t, vextq, s8, 1) +VEXT_IMPL(int8x16_t, vextq, s8, 2) +VEXT_IMPL(uint16x8_t, vextq, u16, 1) +VEXT_IMPL(uint16x8_t, vextq, u16, 2) +VEXT_IMPL(int16x8_t, vextq, s16, 1) +VEXT_IMPL(int16x8_t, vextq, s16, 2) +VEXT_IMPL(int32x4_t, vextq, s32, 1) +VEXT_IMPL(int32x4_t, vextq, s32, 2) + +#undef VEXT_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_EXT_H */ diff --git a/src/core/NEON/wrapper/intrinsics/gethigh.h b/src/core/NEON/wrapper/intrinsics/gethigh.h new file mode 100644 index 0000000000..d098a27335 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/gethigh.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_GET_HIGH_H +#define ARM_COMPUTE_WRAPPER_GET_HIGH_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VGETHIGH_IMPL(half_vtype, vtype, postfix) \ + inline half_vtype vgethigh(const vtype val) \ + { \ + return vget_high_##postfix(val); \ + } + +VGETHIGH_IMPL(uint8x8_t, uint8x16_t, u8) +VGETHIGH_IMPL(int8x8_t, int8x16_t, s8) +VGETHIGH_IMPL(uint16x4_t, uint16x8_t, u16) +VGETHIGH_IMPL(int16x4_t, int16x8_t, s16) +VGETHIGH_IMPL(uint32x2_t, uint32x4_t, u32) +VGETHIGH_IMPL(int32x2_t, int32x4_t, s32) +VGETHIGH_IMPL(float32x2_t, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETHIGH_IMPL(float16x4_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VGETHIGH_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_GET_HIGH_H */ diff --git a/src/core/NEON/wrapper/intrinsics/getlane.h b/src/core/NEON/wrapper/intrinsics/getlane.h new file mode 100644 index 0000000000..2052751612 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/getlane.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_GET_LANE_H +#define ARM_COMPUTE_WRAPPER_GET_LANE_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VGETLANE_IMPL_8(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vget_lane_##postfix(vector, 0); \ + case 1: \ + return vget_lane_##postfix(vector, 1); \ + case 2: \ + return vget_lane_##postfix(vector, 2); \ + case 3: \ + return vget_lane_##postfix(vector, 3); \ + case 4: \ + return vget_lane_##postfix(vector, 4); \ + case 5: \ + return vget_lane_##postfix(vector, 5); \ + case 6: \ + return vget_lane_##postfix(vector, 6); \ + case 7: \ + return vget_lane_##postfix(vector, 7); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETLANE_IMPL_4(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vget_lane_##postfix(vector, 0); \ + case 1: \ + return vget_lane_##postfix(vector, 1); \ + case 2: \ + return vget_lane_##postfix(vector, 2); \ + case 3: \ + return vget_lane_##postfix(vector, 3); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETLANE_IMPL_2(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vget_lane_##postfix(vector, 0); \ + case 1: \ + return vget_lane_##postfix(vector, 1); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +VGETLANE_IMPL_8(uint8_t, uint8x8_t, u8) +VGETLANE_IMPL_8(int8_t, int8x8_t, s8) +VGETLANE_IMPL_4(uint16_t, uint16x4_t, u16) +VGETLANE_IMPL_4(int16_t, int16x4_t, s16) +VGETLANE_IMPL_2(uint32_t, uint32x2_t, u32) +VGETLANE_IMPL_2(int32_t, int32x2_t, s32) +VGETLANE_IMPL_2(float, float32x2_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETLANE_IMPL_4(float16_t, float16x4_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#define VGETQLANE_IMPL_16(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vgetq_lane_##postfix(vector, 0); \ + case 1: \ + return vgetq_lane_##postfix(vector, 1); \ + case 2: \ + return vgetq_lane_##postfix(vector, 2); \ + case 3: \ + return vgetq_lane_##postfix(vector, 3); \ + case 4: \ + return vgetq_lane_##postfix(vector, 4); \ + case 5: \ + return vgetq_lane_##postfix(vector, 5); \ + case 6: \ + return vgetq_lane_##postfix(vector, 6); \ + case 7: \ + return vgetq_lane_##postfix(vector, 7); \ + case 8: \ + return vgetq_lane_##postfix(vector, 8); \ + case 9: \ + return vgetq_lane_##postfix(vector, 9); \ + case 10: \ + return vgetq_lane_##postfix(vector, 10); \ + case 11: \ + return vgetq_lane_##postfix(vector, 11); \ + case 12: \ + return vgetq_lane_##postfix(vector, 12); \ + case 13: \ + return vgetq_lane_##postfix(vector, 13); \ + case 14: \ + return vgetq_lane_##postfix(vector, 14); \ + case 15: \ + return vgetq_lane_##postfix(vector, 15); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETQLANE_IMPL_8(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vgetq_lane_##postfix(vector, 0); \ + case 1: \ + return vgetq_lane_##postfix(vector, 1); \ + case 2: \ + return vgetq_lane_##postfix(vector, 2); \ + case 3: \ + return vgetq_lane_##postfix(vector, 3); \ + case 4: \ + return vgetq_lane_##postfix(vector, 4); \ + case 5: \ + return vgetq_lane_##postfix(vector, 5); \ + case 6: \ + return vgetq_lane_##postfix(vector, 6); \ + case 7: \ + return vgetq_lane_##postfix(vector, 7); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETQLANE_IMPL_4(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vgetq_lane_##postfix(vector, 0); \ + case 1: \ + return vgetq_lane_##postfix(vector, 1); \ + case 2: \ + return vgetq_lane_##postfix(vector, 2); \ + case 3: \ + return vgetq_lane_##postfix(vector, 3); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VGETQLANE_IMPL_2(stype, vtype, postfix) \ + inline stype vgetlane(const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vgetq_lane_##postfix(vector, 0); \ + case 1: \ + return vgetq_lane_##postfix(vector, 1); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +VGETQLANE_IMPL_16(uint8_t, uint8x16_t, u8) +VGETQLANE_IMPL_16(int8_t, int8x16_t, s8) +VGETQLANE_IMPL_8(uint16_t, uint16x8_t, u16) +VGETQLANE_IMPL_8(int16_t, int16x8_t, s16) +VGETQLANE_IMPL_4(uint32_t, uint32x4_t, u32) +VGETQLANE_IMPL_4(int32_t, int32x4_t, s32) +VGETQLANE_IMPL_4(float, float32x4_t, f32) +VGETQLANE_IMPL_2(int64_t, int64x2_t, s64) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETQLANE_IMPL_8(float16_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VGETLANE_IMPL_8 +#undef VGETLANE_IMPL_4 +#undef VGETLANE_IMPL_2 + +#undef VGETQLANE_IMPL_16 +#undef VGETQLANE_IMPL_8 +#undef VGETQLANE_IMPL_4 +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_GET_LANE_H */ diff --git a/src/core/NEON/wrapper/intrinsics/getlow.h b/src/core/NEON/wrapper/intrinsics/getlow.h new file mode 100644 index 0000000000..b5469f0eab --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/getlow.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_GET_LOW_H +#define ARM_COMPUTE_WRAPPER_GET_LOW_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VGETLOW_IMPL(half_vtype, vtype, postfix) \ + inline half_vtype vgetlow(const vtype val) \ + { \ + return vget_low_##postfix(val); \ + } + +VGETLOW_IMPL(uint8x8_t, uint8x16_t, u8) +VGETLOW_IMPL(int8x8_t, int8x16_t, s8) +VGETLOW_IMPL(uint16x4_t, uint16x8_t, u16) +VGETLOW_IMPL(int16x4_t, int16x8_t, s16) +VGETLOW_IMPL(uint32x2_t, uint32x4_t, u32) +VGETLOW_IMPL(int32x2_t, int32x4_t, s32) +VGETLOW_IMPL(float32x2_t, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VGETLOW_IMPL(float16x4_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VGETLOW_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_GET_LOW_H */ diff --git a/src/core/NEON/wrapper/intrinsics/intrinsics.h b/src/core/NEON/wrapper/intrinsics/intrinsics.h new file mode 100644 index 0000000000..495321a6a1 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/intrinsics.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_INTRINSICS_H +#define ARM_COMPUTE_WRAPPER_INTRINSICS_H + +#include "src/core/NEON/wrapper/intrinsics/abs.h" +#include "src/core/NEON/wrapper/intrinsics/add.h" +#include "src/core/NEON/wrapper/intrinsics/and.h" +#include "src/core/NEON/wrapper/intrinsics/bsl.h" +#include "src/core/NEON/wrapper/intrinsics/ceq.h" +#include "src/core/NEON/wrapper/intrinsics/cge.h" +#include "src/core/NEON/wrapper/intrinsics/cgt.h" +#include "src/core/NEON/wrapper/intrinsics/cle.h" +#include "src/core/NEON/wrapper/intrinsics/clt.h" +#include "src/core/NEON/wrapper/intrinsics/combine.h" +#include "src/core/NEON/wrapper/intrinsics/cvt.h" +#include "src/core/NEON/wrapper/intrinsics/div.h" +#include "src/core/NEON/wrapper/intrinsics/dup_n.h" +#include "src/core/NEON/wrapper/intrinsics/eor.h" +#include "src/core/NEON/wrapper/intrinsics/exp.h" +#include "src/core/NEON/wrapper/intrinsics/ext.h" +#include "src/core/NEON/wrapper/intrinsics/gethigh.h" +#include "src/core/NEON/wrapper/intrinsics/getlane.h" +#include "src/core/NEON/wrapper/intrinsics/getlow.h" +#include "src/core/NEON/wrapper/intrinsics/inv.h" +#include "src/core/NEON/wrapper/intrinsics/invsqrt.h" +#include "src/core/NEON/wrapper/intrinsics/load.h" +#include "src/core/NEON/wrapper/intrinsics/log.h" +#include "src/core/NEON/wrapper/intrinsics/max.h" +#include "src/core/NEON/wrapper/intrinsics/min.h" +#include "src/core/NEON/wrapper/intrinsics/mla.h" +#include "src/core/NEON/wrapper/intrinsics/movl.h" +#include "src/core/NEON/wrapper/intrinsics/movn.h" +#include "src/core/NEON/wrapper/intrinsics/mul.h" +#include "src/core/NEON/wrapper/intrinsics/neg.h" +#include "src/core/NEON/wrapper/intrinsics/not.h" +#include "src/core/NEON/wrapper/intrinsics/orr.h" +#include "src/core/NEON/wrapper/intrinsics/pmax.h" +#include "src/core/NEON/wrapper/intrinsics/pmin.h" +#include "src/core/NEON/wrapper/intrinsics/pow.h" +#include "src/core/NEON/wrapper/intrinsics/qmov.h" +#include "src/core/NEON/wrapper/intrinsics/qmovun.h" +#include "src/core/NEON/wrapper/intrinsics/reinterpret.h" +#include "src/core/NEON/wrapper/intrinsics/rev64.h" +#include "src/core/NEON/wrapper/intrinsics/round.h" +#include "src/core/NEON/wrapper/intrinsics/setlane.h" +#include "src/core/NEON/wrapper/intrinsics/sin.h" +#include "src/core/NEON/wrapper/intrinsics/store.h" +#include "src/core/NEON/wrapper/intrinsics/sub.h" +#include "src/core/NEON/wrapper/intrinsics/tanh.h" +#include "src/core/NEON/wrapper/intrinsics/tbl.h" + +#endif /* ARM_COMPUTE_WRAPPER_INTRINSICS_H */ diff --git a/src/core/NEON/wrapper/intrinsics/inv.h b/src/core/NEON/wrapper/intrinsics/inv.h new file mode 100644 index 0000000000..de398b0403 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/inv.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_INV_H +#define ARM_COMPUTE_WRAPPER_INV_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VINV_IMPL(vtype, prefix, postfix) \ + inline vtype vinv(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +#define VINV_IMPL_INT(vtype, prefix, postfix) \ + inline vtype vinv(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + +VINV_IMPL(float32x2_t, vinv, f32) +VINV_IMPL_INT(int32x2_t, vinv, s32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINV_IMPL(float16x4_t, vinv, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VINV_IMPL(float32x4_t, vinvq, f32) +VINV_IMPL_INT(int32x4_t, vinvq, s32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINV_IMPL(float16x8_t, vinvq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VINV_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_INV_H */ diff --git a/src/core/NEON/wrapper/intrinsics/invsqrt.h b/src/core/NEON/wrapper/intrinsics/invsqrt.h new file mode 100644 index 0000000000..2343efa8f8 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/invsqrt.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_INVSQRT_H +#define ARM_COMPUTE_WRAPPER_INVSQRT_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VINVSQRT_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vinvsqrt(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +#define VINVSQRT_IMPL_INT(stype, vtype, prefix, postfix) \ + inline vtype vinvsqrt(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + +VINVSQRT_IMPL(float, float32x2_t, vinvsqrt, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINVSQRT_IMPL(float16_t, float16x4_t, vinvsqrt, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINVSQRT_IMPL_INT(int, int32x4_t, vinvsqrt, s32) + +VINVSQRT_IMPL(float, float32x4_t, vinvsqrtq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINVSQRT_IMPL(float16_t, float16x8_t, vinvsqrtq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VINVSQRT_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_INVSQRT_H */ diff --git a/src/core/NEON/wrapper/intrinsics/load.h b/src/core/NEON/wrapper/intrinsics/load.h new file mode 100644 index 0000000000..a2116c028b --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/load.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_LOAD_H +#define ARM_COMPUTE_WRAPPER_LOAD_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VLOAD_IMPL(stype, vtype, postfix) \ + inline vtype vload(const stype *ptr) \ + { \ + return vld1_##postfix(ptr); \ + } + +VLOAD_IMPL(uint8_t, uint8x8_t, u8) +VLOAD_IMPL(int8_t, int8x8_t, s8) +VLOAD_IMPL(uint16_t, uint16x4_t, u16) +VLOAD_IMPL(int16_t, int16x4_t, s16) +VLOAD_IMPL(uint32_t, uint32x2_t, u32) +VLOAD_IMPL(int32_t, int32x2_t, s32) +//VLOAD_IMPL(uint64_t, uint64x1_t, u64) +//VLOAD_IMPL(int64_t, int64x1_t, s64) +VLOAD_IMPL(float, float32x2_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOAD_IMPL(float16_t, float16x4_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#define VLOADQ_IMPL(stype, vtype, postfix) \ + inline vtype vloadq(const stype *ptr) \ + { \ + return vld1q_##postfix(ptr); \ + } + +VLOADQ_IMPL(uint8_t, uint8x16_t, u8) +VLOADQ_IMPL(int8_t, int8x16_t, s8) +VLOADQ_IMPL(uint16_t, uint16x8_t, u16) +VLOADQ_IMPL(int16_t, int16x8_t, s16) +VLOADQ_IMPL(uint32_t, uint32x4_t, u32) +VLOADQ_IMPL(int32_t, int32x4_t, s32) +//VLOAD_IMPL(uint64_t, uint64x1_t, u64) +//VLOAD_IMPL(int64_t, int64x1_t, s64) +VLOADQ_IMPL(float, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOADQ_IMPL(float16_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#undef VLOAD_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_LOAD_H */ diff --git a/src/core/NEON/wrapper/intrinsics/log.h b/src/core/NEON/wrapper/intrinsics/log.h new file mode 100644 index 0000000000..357a77ca78 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/log.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_LOG_H +#define ARM_COMPUTE_WRAPPER_LOG_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VLOG_IMPL(vtype, prefix, postfix) \ + inline vtype vlog(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +#define VLOG_IMPL_INT(vtype, prefix, postfix) \ + inline vtype vlog(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + +VLOG_IMPL(float32x4_t, vlogq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOG_IMPL(float16x8_t, vlogq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOG_IMPL_INT(int32x4_t, vlogq, s32) + +#undef VLOG_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_LOG_H */ diff --git a/src/core/NEON/wrapper/intrinsics/max.h b/src/core/NEON/wrapper/intrinsics/max.h new file mode 100644 index 0000000000..cec437d171 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/max.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_MAX_H +#define ARM_COMPUTE_WRAPPER_MAX_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMAX_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmax(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VMAX_IMPL(uint8_t, uint8x8_t, vmax, u8) +VMAX_IMPL(int8_t, int8x8_t, vmax, s8) +VMAX_IMPL(uint16_t, uint16x4_t, vmax, u16) +VMAX_IMPL(int16_t, int16x4_t, vmax, s16) +VMAX_IMPL(uint32_t, uint32x2_t, vmax, u32) +VMAX_IMPL(int32_t, int32x2_t, vmax, s32) +VMAX_IMPL(float, float32x2_t, vmax, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMAX_IMPL(float16_t, float16x4_t, vmax, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VMAX_IMPL(uint8_t, uint8x16_t, vmaxq, u8) +VMAX_IMPL(int8_t, int8x16_t, vmaxq, s8) +VMAX_IMPL(uint16_t, uint16x8_t, vmaxq, u16) +VMAX_IMPL(int16_t, int16x8_t, vmaxq, s16) +VMAX_IMPL(uint32_t, uint32x4_t, vmaxq, u32) +VMAX_IMPL(int32_t, int32x4_t, vmaxq, s32) +VMAX_IMPL(float, float32x4_t, vmaxq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMAX_IMPL(float16_t, float16x8_t, vmaxq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VMAX_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_MAX_H */ diff --git a/src/core/NEON/wrapper/intrinsics/min.h b/src/core/NEON/wrapper/intrinsics/min.h new file mode 100644 index 0000000000..8afcb3cb10 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/min.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_MIN_H +#define ARM_COMPUTE_WRAPPER_MIN_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMIN_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmin(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VMIN_IMPL(uint8_t, uint8x8_t, vmin, u8) +VMIN_IMPL(int8_t, int8x8_t, vmin, s8) +VMIN_IMPL(uint16_t, uint16x4_t, vmin, u16) +VMIN_IMPL(int16_t, int16x4_t, vmin, s16) +VMIN_IMPL(uint32_t, uint32x2_t, vmin, u32) +VMIN_IMPL(int32_t, int32x2_t, vmin, s32) +VMIN_IMPL(float, float32x2_t, vmin, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMIN_IMPL(float16_t, float16x4_t, vmin, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VMIN_IMPL(uint8_t, uint8x16_t, vminq, u8) +VMIN_IMPL(int8_t, int8x16_t, vminq, s8) +VMIN_IMPL(uint16_t, uint16x8_t, vminq, u16) +VMIN_IMPL(int16_t, int16x8_t, vminq, s16) +VMIN_IMPL(uint32_t, uint32x4_t, vminq, u32) +VMIN_IMPL(int32_t, int32x4_t, vminq, s32) +VMIN_IMPL(float, float32x4_t, vminq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMIN_IMPL(float16_t, float16x8_t, vminq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VMIN_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_MIN_H */ diff --git a/src/core/NEON/wrapper/intrinsics/mla.h b/src/core/NEON/wrapper/intrinsics/mla.h new file mode 100644 index 0000000000..2b38b34137 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/mla.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_MLA_H +#define ARM_COMPUTE_WRAPPER_MLA_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMLA_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \ + { \ + return prefix##_##postfix(a, b, c); \ + } +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#define VMLA_IMPL2(stype, vtype, prefix1, prefix2, postfix) \ + inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \ + { \ + return prefix1##_##postfix(a, prefix2##_##postfix(b, c)); \ + } +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VMLA_IMPL(uint8x8_t, uint8x8_t, vmla, u8) +VMLA_IMPL(int8x8_t, int8x8_t, vmla, s8) +VMLA_IMPL(uint16x4_t, uint16x4_t, vmla, u16) +VMLA_IMPL(int16x4_t, int16x4_t, vmla, s16) +VMLA_IMPL(uint32x2_t, uint32x2_t, vmla, u32) +VMLA_IMPL(int32x2_t, int32x2_t, vmla, s32) +VMLA_IMPL(float32x2_t, float32x2_t, vmla, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMLA_IMPL2(float16x4_t, float16x4_t, vadd, vmul, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VMLA_IMPL(uint8x16_t, uint8x16_t, vmlaq, u8) +VMLA_IMPL(int8x16_t, int8x16_t, vmlaq, s8) +VMLA_IMPL(uint16x8_t, uint16x8_t, vmlaq, u16) +VMLA_IMPL(int16x8_t, int16x8_t, vmlaq, s16) +VMLA_IMPL(uint32x4_t, uint32x4_t, vmlaq, u32) +VMLA_IMPL(int32x4_t, int32x4_t, vmlaq, s32) +VMLA_IMPL(float32x4_t, float32x4_t, vmlaq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMLA_IMPL2(float16x8_t, float16x8_t, vaddq, vmulq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VMLA_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_MLA_H */ diff --git a/src/core/NEON/wrapper/intrinsics/movl.h b/src/core/NEON/wrapper/intrinsics/movl.h new file mode 100644 index 0000000000..99f2150eab --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/movl.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_MOVL_H +#define ARM_COMPUTE_WRAPPER_MOVL_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMOVL_IMPL(ptype, vtype, prefix, postfix) \ + inline ptype vmovl(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VMOVL_IMPL(uint16x8_t, uint8x8_t, vmovl, u8) +VMOVL_IMPL(int16x8_t, int8x8_t, vmovl, s8) +VMOVL_IMPL(uint32x4_t, uint16x4_t, vmovl, u16) +VMOVL_IMPL(int32x4_t, int16x4_t, vmovl, s16) +VMOVL_IMPL(uint64x2_t, uint32x2_t, vmovl, u32) +VMOVL_IMPL(int64x2_t, int32x2_t, vmovl, s32) + +#undef VMOVL_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_MOVL_H */ diff --git a/src/core/NEON/wrapper/intrinsics/movn.h b/src/core/NEON/wrapper/intrinsics/movn.h new file mode 100644 index 0000000000..460c277540 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/movn.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_MOVN_H +#define ARM_COMPUTE_WRAPPER_MOVN_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMOVN_IMPL(dtype, vtype, prefix, postfix) \ + inline dtype vmovn(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VMOVN_IMPL(uint32x2_t, uint64x2_t, vmovn, u64) +VMOVN_IMPL(int32x2_t, int64x2_t, vmovn, s64) +VMOVN_IMPL(uint16x4_t, uint32x4_t, vmovn, u32) +VMOVN_IMPL(int16x4_t, int32x4_t, vmovn, s32) +VMOVN_IMPL(uint8x8_t, uint16x8_t, vmovn, u16) +VMOVN_IMPL(int8x8_t, int16x8_t, vmovn, s16) + +#define VQMOVN_IMPL(dtype, vtype, prefix, postfix) \ + inline dtype vqmovn(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VQMOVN_IMPL(uint32x2_t, uint64x2_t, vqmovn, u64) +VQMOVN_IMPL(int32x2_t, int64x2_t, vqmovn, s64) +VQMOVN_IMPL(uint16x4_t, uint32x4_t, vqmovn, u32) +VQMOVN_IMPL(int16x4_t, int32x4_t, vqmovn, s32) +VQMOVN_IMPL(uint8x8_t, uint16x8_t, vqmovn, u16) +VQMOVN_IMPL(int8x8_t, int16x8_t, vqmovn, s16) + +#undef VMOVN_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_MOVN_H */ diff --git a/src/core/NEON/wrapper/intrinsics/mul.h b/src/core/NEON/wrapper/intrinsics/mul.h new file mode 100644 index 0000000000..6296fff35a --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/mul.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_MUL_H +#define ARM_COMPUTE_WRAPPER_MUL_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMUL_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmul(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VMUL_IMPL(uint8x8_t, uint8x8_t, vmul, u8) +VMUL_IMPL(int8x8_t, int8x8_t, vmul, s8) +VMUL_IMPL(uint16x4_t, uint16x4_t, vmul, u16) +VMUL_IMPL(int16x4_t, int16x4_t, vmul, s16) +VMUL_IMPL(uint32x2_t, uint32x2_t, vmul, u32) +VMUL_IMPL(int32x2_t, int32x2_t, vmul, s32) +VMUL_IMPL(float32x2_t, float32x2_t, vmul, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMUL_IMPL(float16_t, float16x4_t, vmul, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VMUL_IMPL(uint8_t, uint8x16_t, vmulq, u8) +VMUL_IMPL(int8_t, int8x16_t, vmulq, s8) +VMUL_IMPL(uint16_t, uint16x8_t, vmulq, u16) +VMUL_IMPL(int16_t, int16x8_t, vmulq, s16) +VMUL_IMPL(uint32_t, uint32x4_t, vmulq, u32) +VMUL_IMPL(int32_t, int32x4_t, vmulq, s32) +VMUL_IMPL(float32x4_t, float32x4_t, vmulq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMUL_IMPL(float16_t, float16x8_t, vmulq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VMUL_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_MUL_H */ diff --git a/src/core/NEON/wrapper/intrinsics/neg.h b/src/core/NEON/wrapper/intrinsics/neg.h new file mode 100644 index 0000000000..5e4556664e --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/neg.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_NEG_H +#define ARM_COMPUTE_WRAPPER_NEG_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VNEG_IMPL(vtype, prefix, postfix) \ + inline vtype vneg(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VNEG_IMPL(int8x8_t, vneg, s8) +VNEG_IMPL(int16x4_t, vneg, s16) +VNEG_IMPL(int32x2_t, vneg, s32) +VNEG_IMPL(float32x2_t, vneg, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VNEG_IMPL(float16x4_t, vneg, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VNEG_IMPL(int8x16_t, vnegq, s8) +VNEG_IMPL(int16x8_t, vnegq, s16) +VNEG_IMPL(int32x4_t, vnegq, s32) +VNEG_IMPL(float32x4_t, vnegq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VNEG_IMPL(float16x8_t, vnegq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VNEG_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_NEG_H */ diff --git a/src/core/NEON/wrapper/intrinsics/not.h b/src/core/NEON/wrapper/intrinsics/not.h new file mode 100644 index 0000000000..5853e849a2 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/not.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_NOT_H +#define ARM_COMPUTE_WRAPPER_NOT_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VNOT_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vnot(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VNOT_IMPL(uint8_t, uint8x8_t, vmvn, u8) +VNOT_IMPL(int8_t, int8x8_t, vmvn, s8) +VNOT_IMPL(uint16_t, uint16x4_t, vmvn, u16) +VNOT_IMPL(int16_t, int16x4_t, vmvn, s16) +VNOT_IMPL(uint32_t, uint32x2_t, vmvn, u32) +VNOT_IMPL(int32_t, int32x2_t, vmvn, s32) +VNOT_IMPL(float32x2_t, float32x2_t, vinv, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VNOT_IMPL(float16x4_t, float16x4_t, vinv, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VNOT_IMPL(uint8_t, uint8x16_t, vmvnq, u8) +VNOT_IMPL(int8_t, int8x16_t, vmvnq, s8) +VNOT_IMPL(uint16_t, uint16x8_t, vmvnq, u16) +VNOT_IMPL(int16_t, int16x8_t, vmvnq, s16) +VNOT_IMPL(uint32_t, uint32x4_t, vmvnq, u32) +VNOT_IMPL(int32_t, int32x4_t, vmvnq, s32) +VNOT_IMPL(float32x4_t, float32x4_t, vinvq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VNOT_IMPL(float16x8_t, float16x8_t, vinvq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VNOT_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_NOT_H */ diff --git a/src/core/NEON/wrapper/intrinsics/orr.h b/src/core/NEON/wrapper/intrinsics/orr.h new file mode 100644 index 0000000000..cc83e95d15 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/orr.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_ORR_H +#define ARM_COMPUTE_WRAPPER_ORR_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VORR_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vorr(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VORR_IMPL(uint8_t, uint8x8_t, vorr, u8) +VORR_IMPL(int8_t, int8x8_t, vorr, s8) +VORR_IMPL(uint16_t, uint16x4_t, vorr, u16) +VORR_IMPL(int16_t, int16x4_t, vorr, s16) +VORR_IMPL(uint32_t, uint32x2_t, vorr, u32) +VORR_IMPL(int32_t, int32x2_t, vorr, s32) +VORR_IMPL(uint64_t, uint64x1_t, vorr, u64) +VORR_IMPL(int64_t, int64x1_t, vorr, s64) + +VORR_IMPL(uint8_t, uint8x16_t, vorrq, u8) +VORR_IMPL(int8_t, int8x16_t, vorrq, s8) +VORR_IMPL(uint16_t, uint16x8_t, vorrq, u16) +VORR_IMPL(int16_t, int16x8_t, vorrq, s16) +VORR_IMPL(uint32_t, uint32x4_t, vorrq, u32) +VORR_IMPL(int32_t, int32x4_t, vorrq, s32) +VORR_IMPL(uint64_t, uint64x2_t, vorrq, u64) +VORR_IMPL(int64_t, int64x2_t, vorrq, s64) + +#undef VORR_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_ORR_H */ diff --git a/src/core/NEON/wrapper/intrinsics/pmax.h b/src/core/NEON/wrapper/intrinsics/pmax.h new file mode 100644 index 0000000000..cd2b2d1f41 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/pmax.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_PMAX_H +#define ARM_COMPUTE_WRAPPER_PMAX_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VPMAX_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vpmax(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VPMAX_IMPL(uint8_t, uint8x8_t, vpmax, u8) +VPMAX_IMPL(int8_t, int8x8_t, vpmax, s8) +VPMAX_IMPL(uint16_t, uint16x4_t, vpmax, u16) +VPMAX_IMPL(int16_t, int16x4_t, vpmax, s16) +VPMAX_IMPL(uint32_t, uint32x2_t, vpmax, u32) +VPMAX_IMPL(int32_t, int32x2_t, vpmax, s32) +VPMAX_IMPL(float, float32x2_t, vpmax, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VPMAX_IMPL(float16_t, float16x4_t, vpmax, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VPMAX_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_PMAX_H */ diff --git a/src/core/NEON/wrapper/intrinsics/pmin.h b/src/core/NEON/wrapper/intrinsics/pmin.h new file mode 100644 index 0000000000..59b6be69ce --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/pmin.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_PMIN_H +#define ARM_COMPUTE_WRAPPER_PMIN_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VPMIN_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vpmin(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VPMIN_IMPL(uint8_t, uint8x8_t, vpmin, u8) +VPMIN_IMPL(int8_t, int8x8_t, vpmin, s8) +VPMIN_IMPL(uint16_t, uint16x4_t, vpmin, u16) +VPMIN_IMPL(int16_t, int16x4_t, vpmin, s16) +VPMIN_IMPL(uint32_t, uint32x2_t, vpmin, u32) +VPMIN_IMPL(int32_t, int32x2_t, vpmin, s32) +VPMIN_IMPL(float, float32x2_t, vpmin, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VPMIN_IMPL(float16_t, float16x4_t, vpmin, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VPMIN_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_PMIN_H */ diff --git a/src/core/NEON/wrapper/intrinsics/pow.h b/src/core/NEON/wrapper/intrinsics/pow.h new file mode 100644 index 0000000000..61f834ed23 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/pow.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_POW_H +#define ARM_COMPUTE_WRAPPER_POW_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VPOW_IMPL(vtype, prefix, postfix) \ + inline vtype vpow(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VPOW_IMPL(float32x4_t, vpowq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VPOW_IMPL(float16x8_t, vpowq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VPOW_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_POW_H */ diff --git a/src/core/NEON/wrapper/intrinsics/qmov.h b/src/core/NEON/wrapper/intrinsics/qmov.h new file mode 100644 index 0000000000..167f3cf43b --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/qmov.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_QMOV_H +#define ARM_COMPUTE_WRAPPER_QMOV_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +template +inline typename std::enable_if::value, uint8x8_t>::type +vqmov(const int16x8_t &a) +{ + return vqmovun_s16(a); +} + +template +inline typename std::enable_if::value, int8x8_t>::type +vqmov(const int16x8_t &a) +{ + return vqmovn_s16(a); +} + +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_QMOV_H */ diff --git a/src/core/NEON/wrapper/intrinsics/qmovun.h b/src/core/NEON/wrapper/intrinsics/qmovun.h new file mode 100644 index 0000000000..f823ddb513 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/qmovun.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_QMOVUN_H +#define ARM_COMPUTE_WRAPPER_QMOVUN_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VQMOVUN_IMPL(dtype, vtype, prefix, postfix) \ + inline dtype vqmovun(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VQMOVUN_IMPL(uint32x2_t, int64x2_t, vqmovun, s64) +VQMOVUN_IMPL(uint16x4_t, int32x4_t, vqmovun, s32) +VQMOVUN_IMPL(uint8x8_t, int16x8_t, vqmovun, s16) + +#undef VQMOVUN_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_QMOVUN_H */ diff --git a/src/core/NEON/wrapper/intrinsics/reinterpret.h b/src/core/NEON/wrapper/intrinsics/reinterpret.h new file mode 100644 index 0000000000..0c26cd9008 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/reinterpret.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_REINTERPRET_H +#define ARM_COMPUTE_WRAPPER_REINTERPRET_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VREINTERPRET_IMPL(ptype, vtype, prefix, postfix1, postfix2) \ + inline ptype vreinterpret(const vtype &a) \ + { \ + return prefix##_##postfix1##_##postfix2(a); \ + } \ + \ + inline ptype vreinterpret(const ptype &a) \ + { \ + return a; \ + } + +VREINTERPRET_IMPL(int16x4_t, uint16x4_t, vreinterpret, s16, u16) + +VREINTERPRET_IMPL(int32x4_t, uint32x4_t, vreinterpretq, s32, u32) +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_REINTERPRET_H */ diff --git a/src/core/NEON/wrapper/intrinsics/rev64.h b/src/core/NEON/wrapper/intrinsics/rev64.h new file mode 100644 index 0000000000..0f0139c93b --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/rev64.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_REV64_H +#define ARM_COMPUTE_WRAPPER_REV64_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VREV64_IMPL(vtype, prefix, postfix) \ + inline vtype vrev64(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VREV64_IMPL(uint8x8_t, vrev64, u8) +VREV64_IMPL(int8x8_t, vrev64, s8) +VREV64_IMPL(uint16x4_t, vrev64, u16) +VREV64_IMPL(int16x4_t, vrev64, s16) +VREV64_IMPL(uint32x2_t, vrev64, u32) +VREV64_IMPL(int32x2_t, vrev64, s32) +VREV64_IMPL(float32x2_t, vrev64, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VREV64_IMPL(float16x4_t, vrev64, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VREV64_IMPL(uint8x16_t, vrev64q, u8) +VREV64_IMPL(int8x16_t, vrev64q, s8) +VREV64_IMPL(uint16x8_t, vrev64q, u16) +VREV64_IMPL(int16x8_t, vrev64q, s16) +VREV64_IMPL(uint32x4_t, vrev64q, u32) +VREV64_IMPL(int32x4_t, vrev64q, s32) +VREV64_IMPL(float32x4_t, vrev64q, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VREV64_IMPL(float16x8_t, vrev64q, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VREV64_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_REV64_H */ diff --git a/src/core/NEON/wrapper/intrinsics/round.h b/src/core/NEON/wrapper/intrinsics/round.h new file mode 100644 index 0000000000..d23feb6b42 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/round.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_ROUND_H +#define ARM_COMPUTE_WRAPPER_ROUND_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VROUNDQ_IMPL(vtype, postfix) \ + inline vtype vround(const vtype &a) \ + { \ + return vroundq_rte_##postfix(a); \ + } + +#define VROUNDQ_IMPL_INT(vtype, postfix) \ + inline vtype vround(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + +VROUNDQ_IMPL(float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VROUNDQ_IMPL(float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VROUNDQ_IMPL_INT(int32x4_t, s32) +#undef VROUNDQ_IMPL + +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_ROUND_H */ diff --git a/src/core/NEON/wrapper/intrinsics/setlane.h b/src/core/NEON/wrapper/intrinsics/setlane.h new file mode 100644 index 0000000000..197eedacb5 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/setlane.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SET_LANE_H +#define ARM_COMPUTE_WRAPPER_SET_LANE_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VSETLANE_IMPL_8(stype, atype, vtype, postfix) \ + inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vset_lane_##postfix(value, vector, 0); \ + case 1: \ + return vset_lane_##postfix(value, vector, 1); \ + case 2: \ + return vset_lane_##postfix(value, vector, 2); \ + case 3: \ + return vset_lane_##postfix(value, vector, 3); \ + case 4: \ + return vset_lane_##postfix(value, vector, 4); \ + case 5: \ + return vset_lane_##postfix(value, vector, 5); \ + case 6: \ + return vset_lane_##postfix(value, vector, 6); \ + case 7: \ + return vset_lane_##postfix(value, vector, 7); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VSETLANE_IMPL_4(stype, atype, vtype, postfix) \ + inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vset_lane_##postfix(value, vector, 0); \ + case 1: \ + return vset_lane_##postfix(value, vector, 1); \ + case 2: \ + return vset_lane_##postfix(value, vector, 2); \ + case 3: \ + return vset_lane_##postfix(value, vector, 3); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VSETLANE_IMPL_2(stype, atype, vtype, postfix) \ + inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vset_lane_##postfix(value, vector, 0); \ + case 1: \ + return vset_lane_##postfix(value, vector, 1); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +VSETLANE_IMPL_8(uint8x8_t, uint8_t, uint8x8_t, u8) +VSETLANE_IMPL_8(int8x8_t, int8_t, int8x8_t, s8) +VSETLANE_IMPL_4(uint16x4_t, uint16_t, uint16x4_t, u16) +VSETLANE_IMPL_4(int16x4_t, int16_t, int16x4_t, s16) +VSETLANE_IMPL_2(uint32x2_t, uint32_t, uint32x2_t, u32) +VSETLANE_IMPL_2(int32x2_t, int32_t, int32x2_t, s32) +VSETLANE_IMPL_2(float32x2_t, float, float32x2_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSETLANE_IMPL_4(float16x4_t, float16_t, float16x4_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#define VSETQLANE_IMPL_16(stype, atype, vtype, postfix) \ + inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vsetq_lane_##postfix(value, vector, 0); \ + case 1: \ + return vsetq_lane_##postfix(value, vector, 1); \ + case 2: \ + return vsetq_lane_##postfix(value, vector, 2); \ + case 3: \ + return vsetq_lane_##postfix(value, vector, 3); \ + case 4: \ + return vsetq_lane_##postfix(value, vector, 4); \ + case 5: \ + return vsetq_lane_##postfix(value, vector, 5); \ + case 6: \ + return vsetq_lane_##postfix(value, vector, 6); \ + case 7: \ + return vsetq_lane_##postfix(value, vector, 7); \ + case 8: \ + return vsetq_lane_##postfix(value, vector, 8); \ + case 9: \ + return vsetq_lane_##postfix(value, vector, 9); \ + case 10: \ + return vsetq_lane_##postfix(value, vector, 10); \ + case 11: \ + return vsetq_lane_##postfix(value, vector, 11); \ + case 12: \ + return vsetq_lane_##postfix(value, vector, 12); \ + case 13: \ + return vsetq_lane_##postfix(value, vector, 13); \ + case 14: \ + return vsetq_lane_##postfix(value, vector, 14); \ + case 15: \ + return vsetq_lane_##postfix(value, vector, 15); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VSETQLANE_IMPL_8(stype, atype, vtype, postfix) \ + inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vsetq_lane_##postfix(value, vector, 0); \ + case 1: \ + return vsetq_lane_##postfix(value, vector, 1); \ + case 2: \ + return vsetq_lane_##postfix(value, vector, 2); \ + case 3: \ + return vsetq_lane_##postfix(value, vector, 3); \ + case 4: \ + return vsetq_lane_##postfix(value, vector, 4); \ + case 5: \ + return vsetq_lane_##postfix(value, vector, 5); \ + case 6: \ + return vsetq_lane_##postfix(value, vector, 6); \ + case 7: \ + return vsetq_lane_##postfix(value, vector, 7); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +#define VSETQLANE_IMPL_4(stype, atype, vtype, postfix) \ + inline stype vsetlane(const atype value, const vtype vector, const unsigned int lane) \ + { \ + switch(lane) \ + { \ + case 0: \ + return vsetq_lane_##postfix(value, vector, 0); \ + case 1: \ + return vsetq_lane_##postfix(value, vector, 1); \ + case 2: \ + return vsetq_lane_##postfix(value, vector, 2); \ + case 3: \ + return vsetq_lane_##postfix(value, vector, 3); \ + default: \ + ARM_COMPUTE_ERROR("Invalid lane"); \ + } \ + } + +VSETQLANE_IMPL_16(uint8x16_t, uint8_t, uint8x16_t, u8) +VSETQLANE_IMPL_16(int8x16_t, int8_t, int8x16_t, s8) +VSETQLANE_IMPL_8(uint16x8_t, uint16_t, uint16x8_t, u16) +VSETQLANE_IMPL_8(int16x8_t, int16_t, int16x8_t, s16) +VSETQLANE_IMPL_4(uint32x4_t, uint32_t, uint32x4_t, u32) +VSETQLANE_IMPL_4(int32x4_t, int32_t, int32x4_t, s32) +VSETQLANE_IMPL_4(float32x4_t, float, float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSETQLANE_IMPL_8(float16x8_t, float16_t, float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VSETLANE_IMPL_8 +#undef VSETLANE_IMPL_4 +#undef VSETLANE_IMPL_2 + +#undef VSETQLANE_IMPL_16 +#undef VSETQLANE_IMPL_8 +#undef VSETQLANE_IMPL_4 +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_SET_LANE_H */ diff --git a/src/core/NEON/wrapper/intrinsics/sin.h b/src/core/NEON/wrapper/intrinsics/sin.h new file mode 100644 index 0000000000..03c2813a32 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/sin.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SIN_H +#define ARM_COMPUTE_WRAPPER_SIN_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VSIN_IMPL(vtype, prefix, postfix) \ + inline vtype vsin(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +#define VSIN_IMPL_INT(vtype, prefix, postfix) \ + inline vtype vsin(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + +VSIN_IMPL(float32x4_t, vsinq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSIN_IMPL(float16x8_t, vsinq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VSIN_IMPL_INT(int32x4_t, vsinq, s32) + +#undef vsub_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_SUB_H */ \ No newline at end of file diff --git a/src/core/NEON/wrapper/intrinsics/store.h b/src/core/NEON/wrapper/intrinsics/store.h new file mode 100644 index 0000000000..6dda432ea9 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/store.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_STORE_H +#define ARM_COMPUTE_WRAPPER_STORE_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VSTORE_IMPL(stype, vtype, prefix, postfix) \ + inline void vstore(stype *ptr, vtype val) \ + { \ + prefix##_##postfix(ptr, val); \ + } + +VSTORE_IMPL(uint8_t, uint8x8_t, vst1, u8) +VSTORE_IMPL(uint8_t, uint8x8x2_t, vst2, u8) +VSTORE_IMPL(int8_t, int8x8_t, vst1, s8) +VSTORE_IMPL(int8_t, int8x8x2_t, vst2, s8) +VSTORE_IMPL(uint16_t, uint16x4_t, vst1, u16) +VSTORE_IMPL(int16_t, int16x4_t, vst1, s16) +VSTORE_IMPL(uint32_t, uint32x2_t, vst1, u32) +VSTORE_IMPL(int32_t, int32x2_t, vst1, s32) +//VSTORE_IMPL(uint64_t, 1, vst1, u64) +//VSTORE_IMPL(int64_t, 1, vst1, s64) +VSTORE_IMPL(float, float32x2_t, vst1, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSTORE_IMPL(float16_t, float16x4_t, vst1, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VSTORE_IMPL(uint8_t, uint8x16_t, vst1q, u8) +VSTORE_IMPL(int8_t, int8x16_t, vst1q, s8) +VSTORE_IMPL(uint16_t, uint16x8_t, vst1q, u16) +VSTORE_IMPL(int16_t, int16x8_t, vst1q, s16) +VSTORE_IMPL(uint32_t, uint32x4_t, vst1q, u32) +VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32) +//VSTORE_IMPL(uint64_t, 2, vst1q, u64) +//VSTORE_IMPL(int64_t, 2, vst1q, s64) +VSTORE_IMPL(float, float32x4_t, vst1q, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSTORE_IMPL(float16_t, float16x8_t, vst1q, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VSTORE_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_STORE_H */ diff --git a/src/core/NEON/wrapper/intrinsics/sub.h b/src/core/NEON/wrapper/intrinsics/sub.h new file mode 100644 index 0000000000..475986d0f6 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/sub.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SUB_H +#define ARM_COMPUTE_WRAPPER_SUB_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VSUB_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vsub(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VSUB_IMPL(uint8x8_t, uint8x8_t, vsub, u8) +VSUB_IMPL(int8x8_t, int8x8_t, vsub, s8) +VSUB_IMPL(uint16x4_t, uint16x4_t, vsub, u16) +VSUB_IMPL(int16x4_t, int16x4_t, vsub, s16) +VSUB_IMPL(uint32x2_t, uint32x2_t, vsub, u32) +VSUB_IMPL(int32x2_t, int32x2_t, vsub, s32) +VSUB_IMPL(uint64x1_t, uint64x1_t, vsub, u64) +VSUB_IMPL(int64x1_t, int64x1_t, vsub, s64) +VSUB_IMPL(float32x2_t, float32x2_t, vsub, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSUB_IMPL(float16x4_t, float16x4_t, vsub, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VSUB_IMPL(uint8x16_t, uint8x16_t, vsubq, u8) +VSUB_IMPL(int8x16_t, int8x16_t, vsubq, s8) +VSUB_IMPL(uint16x8_t, uint16x8_t, vsubq, u16) +VSUB_IMPL(int16x8_t, int16x8_t, vsubq, s16) +VSUB_IMPL(uint32x4_t, uint32x4_t, vsubq, u32) +VSUB_IMPL(int32x4_t, int32x4_t, vsubq, s32) +VSUB_IMPL(uint64x2_t, uint64x2_t, vsubq, u64) +VSUB_IMPL(int64x2_t, int64x2_t, vsubq, s64) +VSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VSUB_IMPL + +// VQSUB: Vector saturating sub (No notion of saturation for floating point) +#define VQSUB_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vqsub(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VQSUB_IMPL(uint8x8_t, uint8x8_t, vqsub, u8) +VQSUB_IMPL(int8x8_t, int8x8_t, vqsub, s8) +VQSUB_IMPL(uint16x4_t, uint16x4_t, vqsub, u16) +VQSUB_IMPL(int16x4_t, int16x4_t, vqsub, s16) +VQSUB_IMPL(uint32x2_t, uint32x2_t, vqsub, u32) +VQSUB_IMPL(int32x2_t, int32x2_t, vqsub, s32) +VQSUB_IMPL(uint64x1_t, uint64x1_t, vqsub, u64) +VQSUB_IMPL(int64x1_t, int64x1_t, vqsub, s64) +VQSUB_IMPL(float32x2_t, float32x2_t, vsub, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VQSUB_IMPL(float16x4_t, float16x4_t, vsub, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VQSUB_IMPL(uint8x16_t, uint8x16_t, vqsubq, u8) +VQSUB_IMPL(int8x16_t, int8x16_t, vqsubq, s8) +VQSUB_IMPL(uint16x8_t, uint16x8_t, vqsubq, u16) +VQSUB_IMPL(int16x8_t, int16x8_t, vqsubq, s16) +VQSUB_IMPL(uint32x4_t, uint32x4_t, vqsubq, u32) +VQSUB_IMPL(int32x4_t, int32x4_t, vqsubq, s32) +VQSUB_IMPL(uint64x2_t, uint64x2_t, vqsubq, u64) +VQSUB_IMPL(int64x2_t, int64x2_t, vqsubq, s64) +VQSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VQSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#undef VQSUB_IMPL + +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_SUB_H */ diff --git a/src/core/NEON/wrapper/intrinsics/tanh.h b/src/core/NEON/wrapper/intrinsics/tanh.h new file mode 100644 index 0000000000..daeaf19997 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/tanh.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_TANH_H +#define ARM_COMPUTE_WRAPPER_TANH_H + +#include "src/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VTANH_IMPL(vtype, prefix, postfix) \ + inline vtype vtanh(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VTANH_IMPL(float32x4_t, vtanhq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VTANH_IMPL(float16x8_t, vtanhq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#undef VTANH_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_TANH_H */ diff --git a/src/core/NEON/wrapper/intrinsics/tbl.h b/src/core/NEON/wrapper/intrinsics/tbl.h new file mode 100644 index 0000000000..05e6c1fc13 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/tbl.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_TBL_H +#define ARM_COMPUTE_WRAPPER_TBL_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VTBL_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vtbl(const stype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VTBL_IMPL(uint8x8x2_t, uint8x8_t, vtbl2, u8) +VTBL_IMPL(int8x8x2_t, int8x8_t, vtbl2, s8) + +#undef VTBL_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_TBL_H */ diff --git a/src/core/NEON/wrapper/scalar/add.h b/src/core/NEON/wrapper/scalar/add.h new file mode 100644 index 0000000000..642d9261f3 --- /dev/null +++ b/src/core/NEON/wrapper/scalar/add.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SCALAR_ADD_H +#define ARM_COMPUTE_WRAPPER_SCALAR_ADD_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +inline uint8_t add_sat(const uint8_t &a, const uint8_t &b) +{ + const uint8x8_t va = { a, 0, 0, 0, 0, 0, 0, 0 }; + const uint8x8_t vb = { b, 0, 0, 0, 0, 0, 0, 0 }; + return vget_lane_u8(vqadd_u8(va, vb), 0); +} + +inline int16_t add_sat(const int16_t &a, const int16_t &b) +{ + const int16x4_t va = { a, 0, 0, 0 }; + const int16x4_t vb = { b, 0, 0, 0 }; + return vget_lane_s16(vqadd_s16(va, vb), 0); +} + +inline int32_t add_sat(const int32_t &a, const int32_t &b) +{ + const int32x2_t va = { a, 0 }; + const int32x2_t vb = { b, 0 }; + return vget_lane_s32(vqadd_s32(va, vb), 0); +} + +inline float add_sat(const float &a, const float &b) +{ + // No notion of saturation exists in floating point + return a + b; +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +inline float16_t add_sat(const float16_t &a, const float16_t &b) +{ + // No notion of saturation exists in floating point + return a + b; +} +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_SCALAR_ADD_H */ diff --git a/src/core/NEON/wrapper/scalar/scalar.h b/src/core/NEON/wrapper/scalar/scalar.h new file mode 100644 index 0000000000..8be37e55ba --- /dev/null +++ b/src/core/NEON/wrapper/scalar/scalar.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SCALAR_H +#define ARM_COMPUTE_WRAPPER_SCALAR_H + +#include "src/core/NEON/wrapper/scalar/add.h" +#include "src/core/NEON/wrapper/scalar/sub.h" + +#endif /* ARM_COMPUTE_WRAPPER_SCALAR_H */ diff --git a/src/core/NEON/wrapper/scalar/sub.h b/src/core/NEON/wrapper/scalar/sub.h new file mode 100644 index 0000000000..1fe51d75fc --- /dev/null +++ b/src/core/NEON/wrapper/scalar/sub.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SCALAR_SUB_H +#define ARM_COMPUTE_WRAPPER_SCALAR_SUB_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +inline uint8_t sub_sat(const uint8_t &a, const uint8_t &b) +{ + const uint8x8_t va = { a, 0, 0, 0, 0, 0, 0, 0 }; + const uint8x8_t vb = { b, 0, 0, 0, 0, 0, 0, 0 }; + return vget_lane_u8(vqsub_u8(va, vb), 0); +} + +inline int16_t sub_sat(const int16_t &a, const int16_t &b) +{ + const int16x4_t va = { a, 0, 0, 0 }; + const int16x4_t vb = { b, 0, 0, 0 }; + return vget_lane_s16(vqsub_s16(va, vb), 0); +} + +inline int32_t sub_sat(const int32_t &a, const int32_t &b) +{ + const int32x2_t va = { a, 0 }; + const int32x2_t vb = { b, 0 }; + return vget_lane_s32(vqsub_s32(va, vb), 0); +} + +inline float sub_sat(const float &a, const float &b) +{ + // No notion of saturation exists in floating point + return a - b; +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +inline float16_t sub_sat(const float16_t &a, const float16_t &b) +{ + // No notion of saturation exists in floating point + return a - b; +} +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_SCALAR_SUB_H */ diff --git a/src/core/NEON/wrapper/traits.h b/src/core/NEON/wrapper/traits.h new file mode 100644 index 0000000000..eafbeef372 --- /dev/null +++ b/src/core/NEON/wrapper/traits.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_TRAITS_H +#define ARM_COMPUTE_WRAPPER_TRAITS_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +namespace traits +{ +// *INDENT-OFF* +// clang-format off + +/** 64-bit vector tag */ +struct vector_64_tag {}; +/** 128-bit vector tag */ +struct vector_128_tag {}; + +/** Create the appropriate NEON vector given its type and size in terms of elements */ +template struct neon_vector; + +// Specializations +#ifndef DOXYGEN_SKIP_THIS +template <> struct neon_vector{ using scalar_type = uint8_t; using type = uint8x8_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = int8_t; using type = int8x8_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = uint8_t; using type = uint8x16_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = int8_t; using type = int8x16_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = uint16_t; using type = uint16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = int16_t; using type = int16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = uint16_t; using type = uint16x8_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = uint16_t; using type = uint16x8x2_t; }; +template <> struct neon_vector{ using scalar_type = int16_t; using type = int16x8_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = int16_t; using type = int16x8x2_t; }; +template <> struct neon_vector{ using scalar_type = uint32_t; using type = uint32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = int32_t; using type = int32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = uint32_t; using type = uint32x4_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = int32_t; using type = int32x4_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = uint64_t;using type = uint64x1_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = int64_t; using type = int64x1_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = uint64_t; using type = uint64x2_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = int64_t; using type = int64x2_t; using tag_type = vector_128_tag; }; +template <> struct neon_vector{ using scalar_type = float_t; using type = float32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = float_t; using type = float32x4_t; using tag_type = vector_128_tag; }; + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template <> struct neon_vector{ using scalar_type = float16_t; using type = float16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_vector{ using scalar_type = float16_t; using type = float16x8_t; using tag_type = vector_128_tag; }; +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif /* DOXYGEN_SKIP_THIS */ + +/** Helper type template to get the type of a neon vector */ +template using neon_vector_t = typename neon_vector::type; +/** Helper type template to get the tag type of a neon vector */ +template using neon_vector_tag_t = typename neon_vector::tag_type; + +/** Vector bit-width enum class */ +enum class BitWidth +{ + W64, /**< 64-bit width */ + W128, /**< 128-bit width */ +}; + +/** Create the appropriate NEON vector given its type and size in terms of bits */ +template struct neon_bitvector; +// Specializations +#ifndef DOXYGEN_SKIP_THIS +template <> struct neon_bitvector{ using type = uint8x8_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = int8x8_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = uint8x16_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = int8x16_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = uint16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = int16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = uint16x8_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = int16x8_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = uint32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = int32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = uint32x4_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = int32x4_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = uint64x1_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = int64x1_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = uint64x2_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = int64x2_t; using tag_type = vector_128_tag; }; +template <> struct neon_bitvector{ using type = float32x2_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = float32x4_t; using tag_type = vector_128_tag; }; +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template <> struct neon_bitvector{ using type = float16x4_t; using tag_type = vector_64_tag; }; +template <> struct neon_bitvector{ using type = float16x8_t; using tag_type = vector_128_tag; }; +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#endif /* DOXYGEN_SKIP_THIS */ + +/** Helper type template to get the type of a neon vector */ +template using neon_bitvector_t = typename neon_bitvector::type; +/** Helper type template to get the tag type of a neon vector */ +template using neon_bitvector_tag_t = typename neon_bitvector::tag_type; + +/** Promote a type */ +template struct promote { }; +template <> struct promote { using type = uint16_t; }; +template <> struct promote { using type = int16_t; }; +template <> struct promote { using type = uint32_t; }; +template <> struct promote { using type = int32_t; }; +template <> struct promote { using type = uint64_t; }; +template <> struct promote { using type = int64_t; }; +template <> struct promote { using type = float; }; +template <> struct promote { using type = half; }; + +/** Get promoted type */ +template +using promote_t = typename promote::type; + +// clang-format on +// *INDENT-ON* +} // namespace traits +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_TRAITS_H */ diff --git a/src/core/NEON/wrapper/wrapper.h b/src/core/NEON/wrapper/wrapper.h new file mode 100644 index 0000000000..e5467e98ff --- /dev/null +++ b/src/core/NEON/wrapper/wrapper.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_H +#define ARM_COMPUTE_WRAPPER_H + +// Traits +#include "src/core/NEON/wrapper/traits.h" + +// Intrinsics Overloads +#include "src/core/NEON/wrapper/intrinsics/intrinsics.h" +#include "src/core/NEON/wrapper/scalar/scalar.h" + +#endif /* ARM_COMPUTE_WRAPPER_H */ -- cgit v1.2.1