diff options
author | Viet-Hoa Do <viet-hoa.do@arm.com> | 2022-09-22 10:24:23 +0100 |
---|---|---|
committer | Viet-Hoa Do <viet-hoa.do@arm.com> | 2022-10-03 08:57:23 +0000 |
commit | 40b441905760846e9fdaca283a4a4de038a6ef0d (patch) | |
tree | 38a4f6b5122bfaf44a2a33e90b331a2e1a30b113 /src/cpu/kernels/add | |
parent | ff81de5a9a0f6b9331c3b112cc2aed552f0482a9 (diff) | |
download | ComputeLibrary-40b441905760846e9fdaca283a4a4de038a6ef0d.tar.gz |
Optimize CPU add layer on quantized data
* Use fixed-point arithmetic where possible.
* Various optimization for the FP32-based implementation.
This implementation is kept as the fall-back solution
in case of unrealistic quantization parameters that exceed
the range of fixed-point solution.
Resolves: COMPMID-5458
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I221d2d3801ecaae4fe0b7cf6ae8ef00ca3743665
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8317
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu/kernels/add')
-rw-r--r-- | src/cpu/kernels/add/generic/neon/impl.cpp | 220 | ||||
-rw-r--r-- | src/cpu/kernels/add/generic/neon/impl.h | 5 | ||||
-rw-r--r-- | src/cpu/kernels/add/generic/neon/qasymm8.cpp | 130 | ||||
-rw-r--r-- | src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp | 119 |
4 files changed, 358 insertions, 116 deletions
diff --git a/src/cpu/kernels/add/generic/neon/impl.cpp b/src/cpu/kernels/add/generic/neon/impl.cpp index 67985c985e..0f7b31c754 100644 --- a/src/cpu/kernels/add/generic/neon/impl.cpp +++ b/src/cpu/kernels/add/generic/neon/impl.cpp @@ -157,6 +157,223 @@ void add_same_neon_as_1d_array(const ITensor *src0, const ITensor *src1, ITensor } } +bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst) +{ + const auto iq0 = src0->quantization_info().uniform(); + const auto iq1 = src1->quantization_info().uniform(); + const auto oq = dst->quantization_info().uniform(); + + const auto scale0 = iq0.scale / oq.scale; + const auto scale1 = iq1.scale / oq.scale; + + if(scale0 < -31.f || scale0 > 31.f || scale1 < -31.f || scale1 > 31.f) + { + // The scale factor cannot be stored as 6.10 signed fixed-point number. + return false; + } + + const auto offset = float(oq.offset) - scale0 * float(iq0.offset) - scale1 * float(iq1.offset); + const auto max_acc = (std::abs(scale0) + std::abs(scale1)) * 1024.f + std::abs(offset); + + if(max_acc > 2097151.f) // 2^21 - 1 + { + // It might not be possible to store the result as 22.10 signed fixed-point number. + return false; + } + + return true; +} + +template <typename ScalarType> +void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) +{ + ARM_COMPUTE_UNUSED(policy); + + const auto in0_info = src0->info(); + const auto in1_info = src1->info(); + + const auto &in0_shape = in0_info->tensor_shape(); + const auto &in1_shape = in1_info->tensor_shape(); + + // Create input windows. + Window in0_win = window.broadcast_if_dimension_le_one(in0_shape); + Window in1_win = window.broadcast_if_dimension_le_one(in1_shape); + + // Clear the x dimension on the execution window as we process the whole row each iteration. + Window win = window; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + constexpr int window_step_x = 16; + const auto window_start_x = window.x().start(); + const auto window_end_x = window.x().end(); + const auto is_broadcast_across_x = in0_shape.x() != in1_shape.x(); + + const auto iq0_info = in0_info->quantization_info().uniform(); + const auto iq1_info = in1_info->quantization_info().uniform(); + const auto oq_info = dst->info()->quantization_info().uniform(); + + const auto in0_scale = iq0_info.scale / oq_info.scale; + const auto in1_scale = iq1_info.scale / oq_info.scale; + const auto offset = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset); + + const auto in0_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in0_scale * 1024.f)); + const auto in1_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in1_scale * 1024.f)); + const auto offset_22p10 = static_cast<int32_t>(support::cpp11::lround(offset * 1024.f)); + + if(is_broadcast_across_x) + { + // Prefix: a = non-broadcast, b = broadcast. + + const auto is_broadcast_input_1 = in1_win.x().step() == 0; + auto a_win = is_broadcast_input_1 ? in0_win : in1_win; + auto b_win = is_broadcast_input_1 ? in1_win : in0_win; + const auto a_tensor = is_broadcast_input_1 ? src0 : src1; + const auto b_tensor = is_broadcast_input_1 ? src1 : src0; + + const auto a_scale_6p10 = is_broadcast_input_1 ? in0_scale_6p10 : in1_scale_6p10; + const auto b_scale = is_broadcast_input_1 ? in1_scale : in0_scale; + const auto a_vscale_6p10 = wrapper::vdup_n(a_scale_6p10, wrapper::traits::vector_64_tag()); + + // Clear the x dimension on the execution window as we process the whole row each iteration. + a_win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator a_input_it(a_tensor, a_win); + Iterator b_input_it(b_tensor, b_win); + Iterator out_it(dst, win); + + execute_window_loop(win, [&](const Coordinates &) + { + const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr()); + const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr()); + const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr()); + + const auto b_val = *b_ptr; + const auto b_scaled_22p10 = static_cast<int32_t>(support::cpp11::lround(b_scale * b_val * 1024.f)); + const auto b_vscaled_offseted_22p10 = wrapper::vdup_n(b_scaled_22p10 + offset_22p10, wrapper::traits::vector_128_tag()); + + int x = window_start_x; + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + // Load the input. + const auto a_vin_8p0 = wrapper::vloadq(a_ptr + x); + + // Widen the non-broadcast elements to signed 16-bit regardless of the input signedness. + const auto a_vin_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(a_vin_8p0))); + const auto a_vin_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(a_vin_8p0))); + + // Multiply the non-broadcast elements by the scale factor, add the scaled broadcast elements and the offset. + // Widen and store the result in 32-bit integer. + const auto vout_22p10_00 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_0), a_vscale_6p10); + const auto vout_22p10_01 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_0), a_vscale_6p10); + const auto vout_22p10_10 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_1), a_vscale_6p10); + const auto vout_22p10_11 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_1), a_vscale_6p10); + + // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result. + const auto vout_8p8_0 = wrapper::vcombine( + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00), + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01) + ); + const auto vout_8p8_1 = wrapper::vcombine( + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10), + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11) + ); + + // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result. + const auto vout_8p0 = wrapper::vcombine( + wrapper::vqrshrn<8>(vout_8p8_0), + wrapper::vqrshrn<8>(vout_8p8_1) + ); + + // Store the result. + wrapper::vstore(out_ptr + x, vout_8p0); + } + + // Process the left-over elements. + for(; x < window_end_x; ++x) + { + out_ptr[x] = utility::clamp<int32_t, ScalarType>((int32_t(a_ptr[x]) * a_scale_6p10 + b_scaled_22p10 + offset_22p10) >> 10); + } + }, + b_input_it, a_input_it, out_it); + } + else + { + const auto vscale0_6p10 = wrapper::vdup_n(in0_scale_6p10, wrapper::traits::vector_64_tag()); + const auto vscale1_6p10 = wrapper::vdup_n(in1_scale_6p10, wrapper::traits::vector_64_tag()); + const auto voffset_22p10 = wrapper::vdup_n(offset_22p10, wrapper::traits::vector_128_tag()); + + // Clear the x dimension on the execution window as we process the whole row each iteration. + in0_win.set(Window::DimX, Window::Dimension(0, 1, 1)); + in1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator in0_it(src0, in0_win); + Iterator in1_it(src1, in1_win); + Iterator out_it(dst, win); + + execute_window_loop(win, [&](const Coordinates &) + { + const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr()); + const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr()); + const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr()); + + int x = window_start_x; + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + // Load the inputs. + const auto vin0_8p0 = wrapper::vloadq(in0_ptr + x); + const auto vin1_8p0 = wrapper::vloadq(in1_ptr + x); + + // Widen the input elements to signed 16-bit regardless of the input signedness. + const auto vin0_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin0_8p0))); + const auto vin0_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin0_8p0))); + const auto vin1_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin1_8p0))); + const auto vin1_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin1_8p0))); + + // Multiply the input elements by the scale factor and add the offset. + // Widen and store the result in 32-bit integer. + const auto vscaled0_offseted_22p10_00 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_0), vscale0_6p10); + const auto vscaled0_offseted_22p10_01 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_0), vscale0_6p10); + const auto vscaled0_offseted_22p10_10 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_1), vscale0_6p10); + const auto vscaled0_offseted_22p10_11 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_1), vscale0_6p10); + + const auto vout_22p10_00 = wrapper::vmlal(vscaled0_offseted_22p10_00, wrapper::vgetlow(vin1_16p0_0), vscale1_6p10); + const auto vout_22p10_01 = wrapper::vmlal(vscaled0_offseted_22p10_01, wrapper::vgethigh(vin1_16p0_0), vscale1_6p10); + const auto vout_22p10_10 = wrapper::vmlal(vscaled0_offseted_22p10_10, wrapper::vgetlow(vin1_16p0_1), vscale1_6p10); + const auto vout_22p10_11 = wrapper::vmlal(vscaled0_offseted_22p10_11, wrapper::vgethigh(vin1_16p0_1), vscale1_6p10); + + // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result. + const auto vout_8p8_0 = wrapper::vcombine( + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00), + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01) + ); + const auto vout_8p8_1 = wrapper::vcombine( + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10), + wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11) + ); + + // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result. + const auto vout_8p0 = wrapper::vcombine( + wrapper::vqrshrn<8>(vout_8p8_0), + wrapper::vqrshrn<8>(vout_8p8_1) + ); + + // Store the result. + wrapper::vstore(out_ptr + x, vout_8p0); + } + + // Process the left-over elements. + for(; x < window_end_x; ++x) + { + out_ptr[x] = utility::clamp<int32_t, ScalarType>( + (int32_t(in0_ptr[x]) * in0_scale_6p10 + int32_t(in1_ptr[x]) * in1_scale_6p10 + offset_22p10) >> 10); + } + }, + in0_it, in1_it, out_it); + } +} + template void add_same_neon<float>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); template void add_same_neon<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); template void add_same_neon<int32_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); @@ -175,5 +392,8 @@ template void add_same_neon_as_1d_array<int16_t>(const ITensor *src0, const ITen template void add_same_neon_as_1d_array<float16_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); #endif /* (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ +template void add_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); +template void add_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); + } // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/add/generic/neon/impl.h b/src/cpu/kernels/add/generic/neon/impl.h index f8f0f517b0..e6a12fb4c0 100644 --- a/src/cpu/kernels/add/generic/neon/impl.h +++ b/src/cpu/kernels/add/generic/neon/impl.h @@ -35,6 +35,11 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const template <typename ScalarType> void add_same_neon_as_1d_array(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); + +bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst); + +template <typename ScalarType> +void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window); } // namespace cpu } // namespace arm_compute #endif // SRC_CORE_NEON_KERNELS_ADD_IMPL_H
\ No newline at end of file diff --git a/src/cpu/kernels/add/generic/neon/qasymm8.cpp b/src/cpu/kernels/add/generic/neon/qasymm8.cpp index e357a7ef7f..d8b4bca292 100644 --- a/src/cpu/kernels/add/generic/neon/qasymm8.cpp +++ b/src/cpu/kernels/add/generic/neon/qasymm8.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021 Arm Limited. + * Copyright (c) 2020-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/core/utils/misc/Traits.h" #include "src/core/NEON/wrapper/intrinsics/intrinsics.h" #include "src/core/helpers/WindowHelpers.h" +#include "src/cpu/kernels/add/generic/neon/impl.h" namespace arm_compute { @@ -44,7 +45,7 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co Window win = window; win.set(Window::DimX, Window::Dimension(0, 1, 1)); - const int window_step_x = 16; + constexpr int window_step_x = 16; const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); @@ -53,8 +54,9 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); - const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale); - const float32x4_t voffseto = vdupq_n_f32(oq_info.offset); + const auto scale1 = iq1_info.scale / oq_info.scale; + const auto scale2 = iq2_info.scale / oq_info.scale; + const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset); if(is_broadcast_across_x) { @@ -63,13 +65,10 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; - const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform(); - const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform(); - const float32x4_t vscale1 = is_broadcast_input_2 ? vdupq_n_f32(iq1_info.scale) : vdupq_n_f32(iq2_info.scale); - const float32x4_t vscale2 = is_broadcast_input_2 ? vdupq_n_f32(iq2_info.scale) : vdupq_n_f32(iq1_info.scale); - const int32x4_t voffset1 = is_broadcast_input_2 ? vdupq_n_s32(iq1_info.offset) : vdupq_n_s32(iq2_info.offset); - const int32x4_t voffset2 = is_broadcast_input_2 ? vdupq_n_s32(iq2_info.offset) : vdupq_n_s32(iq1_info.offset); + const auto af_scale = is_broadcast_input_2 ? scale1 : scale2; + const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1; + const auto vscale1 = vdupq_n_f32(af_scale); // Clear X Dimension on execution window as we handle manually non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1)); @@ -80,28 +79,26 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co execute_window_loop(win, [&](const Coordinates &) { - const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto non_broadcast_input_ptr = non_broadcast_input.ptr(); + const auto output_ptr = output.ptr(); - const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr()); - const uint8x16_t broadcast_value_vec = vdupq_n_u8(broadcast_value); - - const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset2)), vscale2); - const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset2)), vscale2); - const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset2)), vscale2); - const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset2)), vscale2); - - const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale; + const auto broadcast_value = *broadcast_input.ptr(); + const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset); + const auto bfs = float(broadcast_value) * bf_scale + offset; // Compute S elements per iteration int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x); - const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1); - const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1); - const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1); - const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1); + + const auto a_u16_0 = vmovl_u8(vget_low_u8(a)); + const auto a_u16_1 = vmovl_u8(vget_high_u8(a)); + + const auto af_0 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1); + const auto af_1 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1); + const auto af_2 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1); + const auto af_3 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1); int32x4_t rf_0{}; int32x4_t rf_1{}; @@ -109,15 +106,15 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co int32x4_t rf_3{}; #ifdef __aarch64__ - rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtnq_s32_f32(af_0); + rf_1 = vcvtnq_s32_f32(af_1); + rf_2 = vcvtnq_s32_f32(af_2); + rf_3 = vcvtnq_s32_f32(af_3); #else //__aarch64__ - rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtq_s32_f32(af_0); + rf_1 = vcvtq_s32_f32(af_1); + rf_2 = vcvtq_s32_f32(af_2); + rf_3 = vcvtq_s32_f32(af_3); #endif //__aarch64__ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1))); @@ -128,8 +125,12 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co // Compute left-over elements for(; x < window_end_x; ++x) { - const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale; - *(output_ptr + x) = quantize_qasymm8((afs + bfs), oq_info); + const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs; +#ifdef __aarch64__ + output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result)); +#else // __aarch64__ + output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result)); +#endif // __aarch64__ } }, broadcast_input, non_broadcast_input, output); @@ -144,16 +145,15 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co Iterator input2(src1, input2_win); Iterator output(dst, win); - const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale); - const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale); - const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset); - const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset); + const auto vscale1 = vdupq_n_f32(scale1); + const auto vscale2 = vdupq_n_f32(scale2); + const auto voffset = vdupq_n_f32(offset); execute_window_loop(win, [&](const Coordinates &) { - const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr()); - const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto input1_ptr = input1.ptr(); + const auto input2_ptr = input2.ptr(); + const auto output_ptr = output.ptr(); // Compute S elements per iteration int x = window_start_x; @@ -162,15 +162,20 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co const uint8x16_t a = vld1q_u8(input1_ptr + x); const uint8x16_t b = vld1q_u8(input2_ptr + x); - const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1); - const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1); - const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1); - const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1); + const auto a_u16_0 = vmovl_u8(vget_low_u8(a)); + const auto a_u16_1 = vmovl_u8(vget_high_u8(a)); + const auto b_u16_0 = vmovl_u8(vget_low_u8(b)); + const auto b_u16_1 = vmovl_u8(vget_high_u8(b)); + + const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1); + const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1); + const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1); + const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1); - const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2); - const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2); - const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2); - const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2); + const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_0))), vscale2); + const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_0))), vscale2); + const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_1))), vscale2); + const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_1))), vscale2); int32x4_t rf_0{}; int32x4_t rf_1{}; @@ -178,15 +183,15 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co int32x4_t rf_3{}; #ifdef __aarch64__ - rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtnq_s32_f32(bf_0); + rf_1 = vcvtnq_s32_f32(bf_1); + rf_2 = vcvtnq_s32_f32(bf_2); + rf_3 = vcvtnq_s32_f32(bf_3); #else //__aarch64__ - rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtq_s32_f32(bf_0); + rf_1 = vcvtq_s32_f32(bf_1); + rf_2 = vcvtq_s32_f32(bf_2); + rf_3 = vcvtq_s32_f32(bf_3); #endif //__aarch64__ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1))); @@ -197,9 +202,12 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co // Compute left-over elements for(; x < window_end_x; ++x) { - const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale; - const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale; - *(output_ptr + x) = quantize_qasymm8((afs + bfs), oq_info); + const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset; +#ifdef __aarch64__ + output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result)); +#else // __aarch64__ + output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result)); +#endif // __aarch64__ } }, input1, input2, output); diff --git a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp index d62d0739f5..a285e483ed 100644 --- a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp +++ b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021 Arm Limited. + * Copyright (c) 2020-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/core/utils/misc/Traits.h" #include "src/core/NEON/wrapper/intrinsics/intrinsics.h" #include "src/core/helpers/WindowHelpers.h" +#include "src/cpu/kernels/add/generic/neon/impl.h" namespace arm_compute { @@ -44,7 +45,7 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * Window win = window; win.set(Window::DimX, Window::Dimension(0, 1, 1)); - const int window_step_x = 16; + constexpr int window_step_x = 16; const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); @@ -53,8 +54,9 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); - const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale); - const float32x4_t voffseto = vdupq_n_f32(oq_info.offset); + const auto scale1 = iq1_info.scale / oq_info.scale; + const auto scale2 = iq2_info.scale / oq_info.scale; + const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset); if(is_broadcast_across_x) { @@ -63,13 +65,10 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; - const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform(); - const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform(); - const float32x4_t vscale1 = is_broadcast_input_2 ? vdupq_n_f32(iq1_info.scale) : vdupq_n_f32(iq2_info.scale); - const float32x4_t vscale2 = is_broadcast_input_2 ? vdupq_n_f32(iq2_info.scale) : vdupq_n_f32(iq1_info.scale); - const int32x4_t voffset1 = is_broadcast_input_2 ? vdupq_n_s32(iq1_info.offset) : vdupq_n_s32(iq2_info.offset); - const int32x4_t voffset2 = is_broadcast_input_2 ? vdupq_n_s32(iq2_info.offset) : vdupq_n_s32(iq1_info.offset); + const auto af_scale = is_broadcast_input_2 ? scale1 : scale2; + const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1; + const auto vscale1 = vdupq_n_f32(af_scale); // Clear X Dimension on execution window as we handle manually non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1)); @@ -83,14 +82,9 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr()); const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); - const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr()); - const int8x16_t broadcast_value_vec = vdupq_n_s8(broadcast_value); - - const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(broadcast_value_vec)))), voffset2)), vscale2); - const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(broadcast_value_vec)))), voffset2)), vscale2); - const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(broadcast_value_vec)))), voffset2)), vscale2); - const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(broadcast_value_vec)))), voffset2)), vscale2); - const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale; + const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr()); + const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset); + const auto bfs = float(broadcast_value) * bf_scale + offset; // Compute S elements per iteration int x = window_start_x; @@ -98,10 +92,13 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * { const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x); - const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1); - const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1); - const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1); - const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1); + const auto a_s16_0 = vmovl_s8(vget_low_s8(a)); + const auto a_s16_1 = vmovl_s8(vget_high_s8(a)); + + const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1); + const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1); + const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1); + const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1); int32x4_t rf_0{}; int32x4_t rf_1{}; @@ -109,15 +106,15 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * int32x4_t rf_3{}; #ifdef __aarch64__ - rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtnq_s32_f32(af_0); + rf_1 = vcvtnq_s32_f32(af_1); + rf_2 = vcvtnq_s32_f32(af_2); + rf_3 = vcvtnq_s32_f32(af_3); #else //__aarch64__ - rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtq_s32_f32(af_0); + rf_1 = vcvtq_s32_f32(af_1); + rf_2 = vcvtq_s32_f32(af_2); + rf_3 = vcvtq_s32_f32(af_3); #endif //__aarch64__ const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1))); @@ -128,8 +125,12 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * // Compute left-over elements for(; x < window_end_x; ++x) { - const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale; - *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), oq_info); + const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs; +#ifdef __aarch64__ + output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result)); +#else // __aarch64__ + output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result)); +#endif // __aarch64__ } }, broadcast_input, non_broadcast_input, output); @@ -144,10 +145,10 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * Iterator input2(src1, input2_win); Iterator output(dst, win); - const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale); - const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale); - const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset); - const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset); + const auto vscale1 = vdupq_n_f32(scale1); + const auto vscale2 = vdupq_n_f32(scale2); + const auto voffset = vdupq_n_f32(offset); + execute_window_loop(win, [&](const Coordinates &) { const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr()); @@ -161,15 +162,20 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * const int8x16_t a = vld1q_s8(input1_ptr + x); const int8x16_t b = vld1q_s8(input2_ptr + x); - const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1); - const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1); - const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1); - const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1); + const auto a_s16_0 = vmovl_s8(vget_low_s8(a)); + const auto a_s16_1 = vmovl_s8(vget_high_s8(a)); + const auto b_s16_0 = vmovl_s8(vget_low_s8(b)); + const auto b_s16_1 = vmovl_s8(vget_high_s8(b)); + + const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1); + const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1); + const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1); + const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1); - const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(b)))), voffset2)), vscale2); - const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(b)))), voffset2)), vscale2); - const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(b)))), voffset2)), vscale2); - const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(b)))), voffset2)), vscale2); + const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2); + const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2); + const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2); + const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2); int32x4_t rf_0{}; int32x4_t rf_1{}; @@ -177,15 +183,15 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * int32x4_t rf_3{}; #ifdef __aarch64__ - rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtnq_s32_f32(bf_0); + rf_1 = vcvtnq_s32_f32(bf_1); + rf_2 = vcvtnq_s32_f32(bf_2); + rf_3 = vcvtnq_s32_f32(bf_3); #else //__aarch64__ - rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo)); - rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo)); - rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo)); - rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo)); + rf_0 = vcvtq_s32_f32(bf_0); + rf_1 = vcvtq_s32_f32(bf_1); + rf_2 = vcvtq_s32_f32(bf_2); + rf_3 = vcvtq_s32_f32(bf_3); #endif //__aarch64__ const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1))); @@ -196,9 +202,12 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor * // Compute left-over elements for(; x < window_end_x; ++x) { - const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale; - const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale; - *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), dst->info()->quantization_info()); + const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset; +#ifdef __aarch64__ + output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result)); +#else // __aarch64__ + output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result)); +#endif // __aarch64__ } }, input1, input2, output); |