aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2022-10-11 13:21:35 +0100
committerViet-Hoa Do <viet-hoa.do@arm.com>2022-10-27 09:30:45 +0000
commit910e3f9b686d16657e37d4c18f234b566c8deec2 (patch)
tree76e71422f484abef9e19aeda15863e07b1acfdf6 /src/core/NEON
parentdc73246af4fec0b36ca3ae3d8e47f51561308309 (diff)
downloadComputeLibrary-910e3f9b686d16657e37d4c18f234b566c8deec2.tar.gz
Fix fixed-point quantized addition
* Use the same rounding function for the left-over part with the vectorized part. Resolves: COMPMID-5640 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I07450b2a43390b77539b78cd5d3e6772bdc38548 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8520 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON')
-rw-r--r--src/core/NEON/wrapper/intrinsics/shr.h41
1 files changed, 41 insertions, 0 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h
index 69fc254b61..d740091464 100644
--- a/src/core/NEON/wrapper/intrinsics/shr.h
+++ b/src/core/NEON/wrapper/intrinsics/shr.h
@@ -49,6 +49,24 @@ VQRSHRN_IMPL(uint32x2_t, uint64x2_t, vqrshrn_n, u64)
#undef VQRSHRN_IMPL
+#ifdef __aarch64__
+#define VQRSHRN_SCALAR_IMPL(half_vtype, vtype, prefix, postfix) \
+ template <int b> \
+ inline half_vtype vqrshrn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQRSHRN_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, s16)
+VQRSHRN_SCALAR_IMPL(uint8_t, uint16_t, vqrshrnh_n, u16)
+VQRSHRN_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, s32)
+VQRSHRN_SCALAR_IMPL(uint16_t, uint32_t, vqrshrns_n, u32)
+VQRSHRN_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, s64)
+VQRSHRN_SCALAR_IMPL(uint32_t, uint64_t, vqrshrnd_n, u64)
+
+#undef VQRSHRN_SCALAR_IMPL
+#endif // __aarch64__
+
// This function is the mixed version of VQRSHRN and VQRSHRUN.
// The input vector is always signed integer, while the returned vector
// can be either signed or unsigned depending on the signedness of scalar type T.
@@ -73,6 +91,29 @@ VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64)
#undef VQRSHRN_EX_IMPL
+#ifdef __aarch64__
+#define VQRSHRN_EX_SCALAR_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_signed##_##postfix(a, b); \
+ } \
+ \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_unsigned##_##postfix(a, b); \
+ }
+
+VQRSHRN_EX_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, vqrshrunh_n, s16)
+VQRSHRN_EX_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, vqrshruns_n, s32)
+VQRSHRN_EX_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, vqrshrund_n, s64)
+
+#undef VQRSHRN_EX_IMPL
+#endif // __aarch64__
+
} // namespace wrapper
} // namespace arm_compute
#endif /* ARM_COMPUTE_WRAPPER_SHR_H */