aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2022-10-11 13:21:35 +0100
committerViet-Hoa Do <viet-hoa.do@arm.com>2022-10-27 09:30:45 +0000
commit910e3f9b686d16657e37d4c18f234b566c8deec2 (patch)
tree76e71422f484abef9e19aeda15863e07b1acfdf6
parentdc73246af4fec0b36ca3ae3d8e47f51561308309 (diff)
downloadComputeLibrary-910e3f9b686d16657e37d4c18f234b566c8deec2.tar.gz
Fix fixed-point quantized addition
* Use the same rounding function for the left-over part with the vectorized part. Resolves: COMPMID-5640 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I07450b2a43390b77539b78cd5d3e6772bdc38548 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8520 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/NEON/wrapper/intrinsics/shr.h41
-rw-r--r--src/cpu/kernels/add/generic/neon/impl.cpp27
2 files changed, 63 insertions, 5 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h
index 69fc254b61..d740091464 100644
--- a/src/core/NEON/wrapper/intrinsics/shr.h
+++ b/src/core/NEON/wrapper/intrinsics/shr.h
@@ -49,6 +49,24 @@ VQRSHRN_IMPL(uint32x2_t, uint64x2_t, vqrshrn_n, u64)
#undef VQRSHRN_IMPL
+#ifdef __aarch64__
+#define VQRSHRN_SCALAR_IMPL(half_vtype, vtype, prefix, postfix) \
+ template <int b> \
+ inline half_vtype vqrshrn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQRSHRN_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, s16)
+VQRSHRN_SCALAR_IMPL(uint8_t, uint16_t, vqrshrnh_n, u16)
+VQRSHRN_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, s32)
+VQRSHRN_SCALAR_IMPL(uint16_t, uint32_t, vqrshrns_n, u32)
+VQRSHRN_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, s64)
+VQRSHRN_SCALAR_IMPL(uint32_t, uint64_t, vqrshrnd_n, u64)
+
+#undef VQRSHRN_SCALAR_IMPL
+#endif // __aarch64__
+
// This function is the mixed version of VQRSHRN and VQRSHRUN.
// The input vector is always signed integer, while the returned vector
// can be either signed or unsigned depending on the signedness of scalar type T.
@@ -73,6 +91,29 @@ VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64)
#undef VQRSHRN_EX_IMPL
+#ifdef __aarch64__
+#define VQRSHRN_EX_SCALAR_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_signed##_##postfix(a, b); \
+ } \
+ \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_unsigned##_##postfix(a, b); \
+ }
+
+VQRSHRN_EX_SCALAR_IMPL(int8_t, int16_t, vqrshrnh_n, vqrshrunh_n, s16)
+VQRSHRN_EX_SCALAR_IMPL(int16_t, int32_t, vqrshrns_n, vqrshruns_n, s32)
+VQRSHRN_EX_SCALAR_IMPL(int32_t, int64_t, vqrshrnd_n, vqrshrund_n, s64)
+
+#undef VQRSHRN_EX_IMPL
+#endif // __aarch64__
+
} // namespace wrapper
} // namespace arm_compute
#endif /* ARM_COMPUTE_WRAPPER_SHR_H */
diff --git a/src/cpu/kernels/add/generic/neon/impl.cpp b/src/cpu/kernels/add/generic/neon/impl.cpp
index 1a0b44fa8c..0d4402e332 100644
--- a/src/cpu/kernels/add/generic/neon/impl.cpp
+++ b/src/cpu/kernels/add/generic/neon/impl.cpp
@@ -205,6 +205,10 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
const auto b_scale = is_broadcast_input_1 ? in1_scale : in0_scale;
const auto a_vscale_6p10 = wrapper::vdup_n(a_scale_6p10, wrapper::traits::vector_64_tag());
+#ifndef __aarch64__
+ const auto a_scale = is_broadcast_input_1 ? in0_scale : in1_scale;
+#endif // __aarch64__
+
// Clear the x dimension on the execution window as we process the whole row each iteration.
a_win.set(Window::DimX, Window::Dimension(0, 1, 1));
@@ -219,8 +223,14 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
const auto b_val = *b_ptr;
- const auto b_scaled_22p10 = static_cast<int32_t>(support::cpp11::lround(b_scale * b_val * 1024.f));
- const auto b_vscaled_offseted_22p10 = wrapper::vdup_n(b_scaled_22p10 + offset_22p10, wrapper::traits::vector_128_tag());
+ const auto b_scaled = b_scale * b_val;
+ const auto b_scaled_22p10 = static_cast<int32_t>(support::cpp11::lround(b_scaled * 1024.f));
+ const auto b_scaled_offseted_22p10 = b_scaled_22p10 + offset_22p10;
+ const auto b_vscaled_offseted_22p10 = wrapper::vdup_n(b_scaled_offseted_22p10, wrapper::traits::vector_128_tag());
+
+#ifndef __aarch64__
+ const auto b_scaled_offseted = b_scaled + offset;
+#endif // __aarch64__
int x = window_start_x;
@@ -263,7 +273,11 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
// Process the left-over elements.
for(; x < window_end_x; ++x)
{
- out_ptr[x] = utility::clamp<int32_t, ScalarType>((int32_t(a_ptr[x]) * a_scale_6p10 + b_scaled_22p10 + offset_22p10) >> 10);
+#ifdef __aarch64__
+ out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<2, ScalarType>(int32_t(a_ptr[x]) * a_scale_6p10 + b_scaled_offseted_22p10));
+#else // __aarch64__
+ out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(a_ptr[x]) * a_scale + b_scaled_offseted));
+#endif // __aarch64__
}
},
b_input_it, a_input_it, out_it);
@@ -337,8 +351,11 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
// Process the left-over elements.
for(; x < window_end_x; ++x)
{
- out_ptr[x] = utility::clamp<int32_t, ScalarType>(
- (int32_t(in0_ptr[x]) * in0_scale_6p10 + int32_t(in1_ptr[x]) * in1_scale_6p10 + offset_22p10) >> 10);
+#ifdef __aarch64__
+ out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<2, ScalarType>(int32_t(in0_ptr[x]) * in0_scale_6p10 + int32_t(in1_ptr[x]) * in1_scale_6p10 + offset_22p10));
+#else // __aarch64__
+ out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(in0_ptr[x]) * in0_scale + float(in1_ptr[x]) * in1_scale + offset));
+#endif // __aarch64__
}
},
in0_it, in1_it, out_it);