From dbdea0d1c025b18d4d82c278c87454427918f5b4 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 16 Oct 2019 19:21:40 +0100 Subject: COMPMID-2308: NEConvolutionLayer: support QUANT8_SYMM_PER_CHANNEL filters Change-Id: Ic1bf5f0d21ccd525f84213a360f7e199d7f50577 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/2177 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- arm_compute/core/NEON/wrapper/intrinsics/add.h | 82 +++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) (limited to 'arm_compute/core/NEON/wrapper/intrinsics/add.h') diff --git a/arm_compute/core/NEON/wrapper/intrinsics/add.h b/arm_compute/core/NEON/wrapper/intrinsics/add.h index 4f4d244489..1839170485 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/add.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/add.h @@ -63,13 +63,13 @@ VADD_IMPL(float16x8_t, float16x8_t, vaddq, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VADD_IMPL +// VQADD: Vector saturating add (No notion of saturation for floating point) #define VQADD_IMPL(stype, vtype, prefix, postfix) \ inline vtype vqadd(const vtype &a, const vtype &b) \ { \ return prefix##_##postfix(a, b); \ } -// VQADD: Vector saturating add (No notion of saturation for floating point) VQADD_IMPL(uint8x8_t, uint8x8_t, vqadd, u8) VQADD_IMPL(int8x8_t, int8x8_t, vqadd, s8) VQADD_IMPL(uint16x4_t, uint16x4_t, vqadd, u16) @@ -96,6 +96,86 @@ VQADD_IMPL(float32x4_t, float32x4_t, vaddq, f32) VQADD_IMPL(float16x8_t, float16x8_t, vaddq, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VQADD_IMPL + +// VADDW: Vector widening add +#define VADDW_IMPL(wtype, vtype, prefix, postfix) \ + inline wtype vaddw(const wtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VADDW_IMPL(uint16x8_t, uint8x8_t, vaddw, u8) +VADDW_IMPL(int16x8_t, int8x8_t, vaddw, s8) +VADDW_IMPL(uint32x4_t, uint16x4_t, vaddw, u16) +VADDW_IMPL(int32x4_t, int16x4_t, vaddw, s16) +VADDW_IMPL(uint64x2_t, uint32x2_t, vaddw, u32) +VADDW_IMPL(int64x2_t, int32x2_t, vaddw, s32) +#undef VADDW_IMPL + +// VADDL: Vector long add +#define VADDL_IMPL(wtype, vtype, prefix, postfix) \ + inline wtype vaddl(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VADDL_IMPL(uint16x8_t, uint8x8_t, vaddl, u8) +VADDL_IMPL(int16x8_t, int8x8_t, vaddl, s8) +VADDL_IMPL(uint32x4_t, uint16x4_t, vaddl, u16) +VADDL_IMPL(int32x4_t, int16x4_t, vaddl, s16) +VADDL_IMPL(uint64x2_t, uint32x2_t, vaddl, u32) +VADDL_IMPL(int64x2_t, int32x2_t, vaddl, s32) +#undef VADDL_IMPL + +#if defined(__aarch64__) +// VADDV: Across vector add +#define VADDV_IMPL(stype, vtype, prefix, postfix) \ + inline stype vaddv(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VADDV_IMPL(uint8_t, uint8x8_t, vaddv, u8) +VADDV_IMPL(int8_t, int8x8_t, vaddv, s8) +VADDV_IMPL(uint16_t, uint16x4_t, vaddv, u16) +VADDV_IMPL(int16_t, int16x4_t, vaddv, s16) +VADDV_IMPL(uint32_t, uint32x2_t, vaddv, u32) +VADDV_IMPL(int32_t, int32x2_t, vaddv, s32) +VADDV_IMPL(float, float32x2_t, vaddv, f32) + +VADDV_IMPL(uint8_t, uint8x16_t, vaddvq, u8) +VADDV_IMPL(int8_t, int8x16_t, vaddvq, s8) +VADDV_IMPL(uint16_t, uint16x8_t, vaddvq, u16) +VADDV_IMPL(int16_t, int16x8_t, vaddvq, s16) +VADDV_IMPL(uint32_t, uint32x4_t, vaddvq, u32) +VADDV_IMPL(int32_t, int32x4_t, vaddvq, s32) +VADDV_IMPL(uint64_t, uint64x2_t, vaddvq, u64) +VADDV_IMPL(int64_t, int64x2_t, vaddvq, s64) +VADDV_IMPL(float, float32x4_t, vaddvq, f32) +#undef VADDV_IMPL +#endif // defined(__aarch64__) + +// VPADDL: Signed add long pairwise +#define VPADDL_IMPL(ltype, vtype, prefix, postfix) \ + inline ltype vpaddl(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VPADDL_IMPL(uint16x4_t, uint8x8_t, vpaddl, u8) +VPADDL_IMPL(int16x4_t, int8x8_t, vpaddl, s8) +VPADDL_IMPL(uint32x2_t, uint16x4_t, vpaddl, u16) +VPADDL_IMPL(int32x2_t, int16x4_t, vpaddl, s16) +VPADDL_IMPL(uint64x1_t, uint32x2_t, vpaddl, u32) +VPADDL_IMPL(int64x1_t, int32x2_t, vpaddl, s32) + +VPADDL_IMPL(uint16x8_t, uint8x16_t, vpaddlq, u8) +VPADDL_IMPL(int16x8_t, int8x16_t, vpaddlq, s8) +VPADDL_IMPL(uint32x4_t, uint16x8_t, vpaddlq, u16) +VPADDL_IMPL(int32x4_t, int16x8_t, vpaddlq, s16) +VPADDL_IMPL(uint64x2_t, uint32x4_t, vpaddlq, u32) +VPADDL_IMPL(int64x2_t, int32x4_t, vpaddlq, s32) +#undef VPADDL_IMPL } // namespace wrapper } // namespace arm_compute #endif /* __ARM_COMPUTE_WRAPPER_ADD_H__ */ -- cgit v1.2.1