From 910e3f9b686d16657e37d4c18f234b566c8deec2 Mon Sep 17 00:00:00 2001 From: Viet-Hoa Do Date: Tue, 11 Oct 2022 13:21:35 +0100 Subject: Fix fixed-point quantized addition * Use the same rounding function for the left-over part with the vectorized part. Resolves: COMPMID-5640 Signed-off-by: Viet-Hoa Do Change-Id: I07450b2a43390b77539b78cd5d3e6772bdc38548 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8520 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins --- src/core/NEON/wrapper/intrinsics/shr.h | 41 ++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'src/core/NEON') diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h index 69fc254b61..d740091464 100644 --- a/src/core/NEON/wrapper/intrinsics/shr.h +++ b/src/core/NEON/wrapper/intrinsics/shr.h @@ -49,6 +49,24 @@ VQRSHRN_IMPL(uint32x2_t, uint64x2_t, vqrshrn_n, u64) #undef VQRSHRN_IMPL +#ifdef __aarch64__ +#define VQRSHRN_SCALAR_IMPL(half_vtype, vtype, prefix, postfix) \ + template \ + inline half_vtype vqrshrn(const vtype &a) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VQRSHRN_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, s16) +VQRSHRN_SCALAR_IMPL(uint8_t, uint16_t, vqrshrnh_n, u16) +VQRSHRN_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, s32) +VQRSHRN_SCALAR_IMPL(uint16_t, uint32_t, vqrshrns_n, u32) +VQRSHRN_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, s64) +VQRSHRN_SCALAR_IMPL(uint32_t, uint64_t, vqrshrnd_n, u64) + +#undef VQRSHRN_SCALAR_IMPL +#endif // __aarch64__ + // This function is the mixed version of VQRSHRN and VQRSHRUN. // The input vector is always signed integer, while the returned vector // can be either signed or unsigned depending on the signedness of scalar type T. @@ -73,6 +91,29 @@ VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64) #undef VQRSHRN_EX_IMPL +#ifdef __aarch64__ +#define VQRSHRN_EX_SCALAR_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \ + template \ + inline typename std::enable_if::value && std::is_signed::value, half_vtype>::type \ + vqrshrn_ex(const vtype &a) \ + { \ + return prefix_signed##_##postfix(a, b); \ + } \ + \ + template \ + inline typename std::enable_if::value && !std::is_signed::value, u##half_vtype>::type \ + vqrshrn_ex(const vtype &a) \ + { \ + return prefix_unsigned##_##postfix(a, b); \ + } + +VQRSHRN_EX_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, vqrshrunh_n, s16) +VQRSHRN_EX_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, vqrshruns_n, s32) +VQRSHRN_EX_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, vqrshrund_n, s64) + +#undef VQRSHRN_EX_IMPL +#endif // __aarch64__ + } // namespace wrapper } // namespace arm_compute #endif /* ARM_COMPUTE_WRAPPER_SHR_H */ -- cgit v1.2.1