aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/kernels/add
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/kernels/add')
-rw-r--r--src/cpu/kernels/add/generic/neon/fp16.cpp5
-rw-r--r--src/cpu/kernels/add/generic/neon/fp32.cpp5
-rw-r--r--src/cpu/kernels/add/generic/neon/impl.cpp711
-rw-r--r--src/cpu/kernels/add/generic/neon/impl.h145
-rw-r--r--src/cpu/kernels/add/generic/neon/integer.cpp11
-rw-r--r--src/cpu/kernels/add/generic/neon/qasymm8.cpp6
-rw-r--r--src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp6
-rw-r--r--src/cpu/kernels/add/generic/neon/qsymm16.cpp162
-rw-r--r--src/cpu/kernels/add/generic/sve/fp16.cpp5
-rw-r--r--src/cpu/kernels/add/generic/sve/fp32.cpp6
-rw-r--r--src/cpu/kernels/add/generic/sve/impl.cpp106
-rw-r--r--src/cpu/kernels/add/generic/sve/impl.h3
-rw-r--r--src/cpu/kernels/add/generic/sve/integer.cpp12
-rw-r--r--src/cpu/kernels/add/generic/sve2/qasymm8.cpp237
-rw-r--r--src/cpu/kernels/add/generic/sve2/qasymm8_signed.cpp193
-rw-r--r--src/cpu/kernels/add/generic/sve2/qsymm16.cpp119
-rw-r--r--src/cpu/kernels/add/list.h7
17 files changed, 994 insertions, 745 deletions
diff --git a/src/cpu/kernels/add/generic/neon/fp16.cpp b/src/cpu/kernels/add/generic/neon/fp16.cpp
index fca7b2cd9f..e7679c14e3 100644
--- a/src/cpu/kernels/add/generic/neon/fp16.cpp
+++ b/src/cpu/kernels/add/generic/neon/fp16.cpp
@@ -30,10 +30,11 @@ namespace arm_compute
{
namespace cpu
{
-void add_fp16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_fp16_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_neon<float16_t>(src0, src1, dst, policy, window);
}
-}
+} // namespace cpu
} // namespace arm_compute
#endif /* (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/add/generic/neon/fp32.cpp b/src/cpu/kernels/add/generic/neon/fp32.cpp
index 1f599b1968..11a970bef4 100644
--- a/src/cpu/kernels/add/generic/neon/fp32.cpp
+++ b/src/cpu/kernels/add/generic/neon/fp32.cpp
@@ -28,9 +28,10 @@ namespace arm_compute
{
namespace cpu
{
-void add_fp32_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_fp32_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_neon<float>(src0, src1, dst, policy, window);
}
-}
+} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/neon/impl.cpp b/src/cpu/kernels/add/generic/neon/impl.cpp
index 2dde13544a..34938cc4c4 100644
--- a/src/cpu/kernels/add/generic/neon/impl.cpp
+++ b/src/cpu/kernels/add/generic/neon/impl.cpp
@@ -23,8 +23,10 @@
*/
#include "src/cpu/kernels/add/generic/neon/impl.h"
+
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/utils/misc/Traits.h"
+
#include "src/core/NEON/wrapper/wrapper.h"
namespace arm_compute
{
@@ -40,7 +42,10 @@ bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo
return add_sub_q8_neon_fixedpoint_possible(src0, src1, dst, true);
}
-bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, bool is_addition)
+bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0,
+ const ITensorInfo *src1,
+ const ITensorInfo *dst,
+ bool is_addition)
{
const auto iq0 = src0->quantization_info().uniform();
const auto iq1 = src1->quantization_info().uniform();
@@ -49,7 +54,7 @@ bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorI
const auto scale0 = iq0.scale / oq.scale;
const auto scale1 = iq1.scale / oq.scale;
- if(scale0 < -15.f || scale0 > 15.f || scale1 < -15.f || scale1 > 15.f)
+ if (scale0 < -15.f || scale0 > 15.f || scale1 < -15.f || scale1 > 15.f)
{
// The scale factor cannot be stored as 5.11 signed fixed-point number.
return false;
@@ -57,9 +62,10 @@ bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorI
const auto offset = float(oq.offset) - scale0 * float(iq0.offset) - scale1 * float(iq1.offset);
- const auto max_acc = is_addition ? ((std::abs(scale0) + std::abs(scale1)) * 256.f + std::abs(offset)) : ((std::abs(scale0) - std::abs(scale1)) * 256.f + std::abs(offset));
+ const auto max_acc = is_addition ? ((std::abs(scale0) + std::abs(scale1)) * 256.f + std::abs(offset))
+ : ((std::abs(scale0) - std::abs(scale1)) * 256.f + std::abs(offset));
- if(max_acc > 1048575.f) // 2^20 - 1
+ if (max_acc > 1048575.f) // 2^20 - 1
{
// It might not be possible to store the result as 21.11 signed fixed-point number.
return false;
@@ -69,13 +75,19 @@ bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorI
}
template <typename ScalarType>
-void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_q8_neon_fixedpoint(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
add_sub_q8_neon_fixedpoint<ScalarType>(src0, src1, dst, policy, window, true /*is_addition*/);
}
template <typename ScalarType>
-void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
+void add_sub_q8_neon_fixedpoint(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition)
{
ARM_COMPUTE_UNUSED(policy);
@@ -103,7 +115,7 @@ void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITenso
const auto oq_info = dst->info()->quantization_info().uniform();
const auto in0_scale = iq0_info.scale / oq_info.scale;
const auto in1_scale = is_addition ? (iq1_info.scale / oq_info.scale) : (-(iq1_info.scale / oq_info.scale));
- const auto offset = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset);
+ const auto offset = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset);
constexpr float _2pow11 = 2048;
const auto in0_scale_5p11 = static_cast<int16_t>(support::cpp11::lround(in0_scale * _2pow11));
@@ -112,7 +124,7 @@ void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITenso
constexpr uint8_t shift_amount_remainder = 3;
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
// Prefix: a = non-broadcast, b = broadcast.
@@ -138,68 +150,75 @@ void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITenso
Iterator out_it(dst, win);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
- const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
- const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
-
- const auto b_val = *b_ptr;
- const auto b_scaled = b_scale * b_val;
- const auto b_scaled_21p11 = static_cast<int32_t>(support::cpp11::lround(b_scaled * _2pow11));
- const auto b_scaled_offseted_21p11 = b_scaled_21p11 + offset_21p11;
- const auto b_vscaled_offseted_21p11 = wrapper::vdup_n(b_scaled_offseted_21p11, wrapper::traits::vector_128_tag());
+ win,
+ [&](const Coordinates &)
+ {
+ const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
+ const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+ const auto b_val = *b_ptr;
+ const auto b_scaled = b_scale * b_val;
+ const auto b_scaled_21p11 = static_cast<int32_t>(support::cpp11::lround(b_scaled * _2pow11));
+ const auto b_scaled_offseted_21p11 = b_scaled_21p11 + offset_21p11;
+ const auto b_vscaled_offseted_21p11 =
+ wrapper::vdup_n(b_scaled_offseted_21p11, wrapper::traits::vector_128_tag());
#ifndef __aarch64__
- const auto b_scaled_offseted = b_scaled + offset;
+ const auto b_scaled_offseted = b_scaled + offset;
#endif // __aarch64__
- int x = window_start_x;
-
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- // Load the input.
- const auto a_vin_8p0 = wrapper::vloadq(a_ptr + x);
-
- // Widen the non-broadcast elements to signed 16-bit regardless of the input signedness.
- const auto a_vin_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(a_vin_8p0)));
- const auto a_vin_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(a_vin_8p0)));
-
- // Multiply the non-broadcast elements by the scale factor, add the scaled broadcast elements and the offset.
- // Widen and store the result in 32-bit integer.
- const auto vout_21p11_00 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_0), a_vscale_5p11);
- const auto vout_21p11_01 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_0), a_vscale_5p11);
- const auto vout_21p11_10 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_1), a_vscale_5p11);
- const auto vout_21p11_11 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_1), a_vscale_5p11);
-
- // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
- const auto vout_8p8_0 = wrapper::vcombine(
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
- const auto vout_8p8_1 = wrapper::vcombine(
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
-
- // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
- const auto vout_8p0 = wrapper::vcombine(
- wrapper::vqrshrn<8>(vout_8p8_0),
- wrapper::vqrshrn<8>(vout_8p8_1));
-
- // Store the result.
- wrapper::vstore(out_ptr + x, vout_8p0);
- }
-
- // Process the left-over elements.
- for(; x < window_end_x; ++x)
- {
+ int x = window_start_x;
+
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Load the input.
+ const auto a_vin_8p0 = wrapper::vloadq(a_ptr + x);
+
+ // Widen the non-broadcast elements to signed 16-bit regardless of the input signedness.
+ const auto a_vin_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(a_vin_8p0)));
+ const auto a_vin_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(a_vin_8p0)));
+
+ // Multiply the non-broadcast elements by the scale factor, add the scaled broadcast elements and the offset.
+ // Widen and store the result in 32-bit integer.
+ const auto vout_21p11_00 =
+ wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_0), a_vscale_5p11);
+ const auto vout_21p11_01 =
+ wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_0), a_vscale_5p11);
+ const auto vout_21p11_10 =
+ wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_1), a_vscale_5p11);
+ const auto vout_21p11_11 =
+ wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_1), a_vscale_5p11);
+
+ // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+ const auto vout_8p8_0 =
+ wrapper::vcombine(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
+ const auto vout_8p8_1 =
+ wrapper::vcombine(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
+
+ // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
+ const auto vout_8p0 =
+ wrapper::vcombine(wrapper::vqrshrn<8>(vout_8p8_0), wrapper::vqrshrn<8>(vout_8p8_1));
+
+ // Store the result.
+ wrapper::vstore(out_ptr + x, vout_8p0);
+ }
+
+ // Process the left-over elements.
+ for (; x < window_end_x; ++x)
+ {
#ifdef __aarch64__
- out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(int32_t(a_ptr[x]) * a_scale_5p11 + b_scaled_offseted_21p11));
+ out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(
+ int32_t(a_ptr[x]) * a_scale_5p11 + b_scaled_offseted_21p11));
#else // __aarch64__
- out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(a_ptr[x]) * a_scale + b_scaled_offseted));
+ out_ptr[x] = utility::clamp<int, ScalarType>(
+ support::cpp11::lround(float(a_ptr[x]) * a_scale + b_scaled_offseted));
#endif // __aarch64__
- }
- },
- b_input_it, a_input_it, out_it);
+ }
+ },
+ b_input_it, a_input_it, out_it);
}
else
{
@@ -216,70 +235,85 @@ void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITenso
Iterator out_it(dst, win);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr());
- const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr());
- const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
-
- int x = window_start_x;
-
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- // Load the inputs.
- const auto vin0_8p0 = wrapper::vloadq(in0_ptr + x);
- const auto vin1_8p0 = wrapper::vloadq(in1_ptr + x);
-
- // Widen the input elements to signed 16-bit regardless of the input signedness.
- const auto vin0_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin0_8p0)));
- const auto vin0_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin0_8p0)));
- const auto vin1_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin1_8p0)));
- const auto vin1_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin1_8p0)));
-
- // Multiply the input elements by the scale factor and add the offset.
- // Widen and store the result in 32-bit integer.
- const auto vscaled0_offseted_21p11_00 = wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_0), vscale0_5p11);
- const auto vscaled0_offseted_21p11_01 = wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_0), vscale0_5p11);
- const auto vscaled0_offseted_21p11_10 = wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_1), vscale0_5p11);
- const auto vscaled0_offseted_21p11_11 = wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_1), vscale0_5p11);
-
- const auto vout_21p11_00 = wrapper::vmlal(vscaled0_offseted_21p11_00, wrapper::vgetlow(vin1_16p0_0), vscale1_5p11);
- const auto vout_21p11_01 = wrapper::vmlal(vscaled0_offseted_21p11_01, wrapper::vgethigh(vin1_16p0_0), vscale1_5p11);
- const auto vout_21p11_10 = wrapper::vmlal(vscaled0_offseted_21p11_10, wrapper::vgetlow(vin1_16p0_1), vscale1_5p11);
- const auto vout_21p11_11 = wrapper::vmlal(vscaled0_offseted_21p11_11, wrapper::vgethigh(vin1_16p0_1), vscale1_5p11);
-
- // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
- const auto vout_8p8_0 = wrapper::vcombine(
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
- const auto vout_8p8_1 = wrapper::vcombine(
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
- wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
-
- // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
- const auto vout_8p0 = wrapper::vcombine(
- wrapper::vqrshrn<8>(vout_8p8_0),
- wrapper::vqrshrn<8>(vout_8p8_1));
-
- // Store the result.
- wrapper::vstore(out_ptr + x, vout_8p0);
- }
-
- // Process the left-over elements.
- for(; x < window_end_x; ++x)
+ win,
+ [&](const Coordinates &)
{
+ const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr());
+ const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr());
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+ int x = window_start_x;
+
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Load the inputs.
+ const auto vin0_8p0 = wrapper::vloadq(in0_ptr + x);
+ const auto vin1_8p0 = wrapper::vloadq(in1_ptr + x);
+
+ // Widen the input elements to signed 16-bit regardless of the input signedness.
+ const auto vin0_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin0_8p0)));
+ const auto vin0_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin0_8p0)));
+ const auto vin1_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin1_8p0)));
+ const auto vin1_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin1_8p0)));
+
+ // Multiply the input elements by the scale factor and add the offset.
+ // Widen and store the result in 32-bit integer.
+ const auto vscaled0_offseted_21p11_00 =
+ wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_0), vscale0_5p11);
+ const auto vscaled0_offseted_21p11_01 =
+ wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_0), vscale0_5p11);
+ const auto vscaled0_offseted_21p11_10 =
+ wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_1), vscale0_5p11);
+ const auto vscaled0_offseted_21p11_11 =
+ wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_1), vscale0_5p11);
+
+ const auto vout_21p11_00 =
+ wrapper::vmlal(vscaled0_offseted_21p11_00, wrapper::vgetlow(vin1_16p0_0), vscale1_5p11);
+ const auto vout_21p11_01 =
+ wrapper::vmlal(vscaled0_offseted_21p11_01, wrapper::vgethigh(vin1_16p0_0), vscale1_5p11);
+ const auto vout_21p11_10 =
+ wrapper::vmlal(vscaled0_offseted_21p11_10, wrapper::vgetlow(vin1_16p0_1), vscale1_5p11);
+ const auto vout_21p11_11 =
+ wrapper::vmlal(vscaled0_offseted_21p11_11, wrapper::vgethigh(vin1_16p0_1), vscale1_5p11);
+
+ // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+ const auto vout_8p8_0 =
+ wrapper::vcombine(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
+ const auto vout_8p8_1 =
+ wrapper::vcombine(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
+
+ // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
+ const auto vout_8p0 =
+ wrapper::vcombine(wrapper::vqrshrn<8>(vout_8p8_0), wrapper::vqrshrn<8>(vout_8p8_1));
+
+ // Store the result.
+ wrapper::vstore(out_ptr + x, vout_8p0);
+ }
+
+ // Process the left-over elements.
+ for (; x < window_end_x; ++x)
+ {
#ifdef __aarch64__
- out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(int32_t(in0_ptr[x]) * in0_scale_5p11 + int32_t(in1_ptr[x]) * in1_scale_5p11 + offset_21p11));
+ out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(
+ int32_t(in0_ptr[x]) * in0_scale_5p11 + int32_t(in1_ptr[x]) * in1_scale_5p11 + offset_21p11));
#else // __aarch64__
- out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(in0_ptr[x]) * in0_scale + float(in1_ptr[x]) * in1_scale + offset));
+ out_ptr[x] = utility::clamp<int, ScalarType>(
+ support::cpp11::lround(float(in0_ptr[x]) * in0_scale + float(in1_ptr[x]) * in1_scale + offset));
#endif // __aarch64__
- }
- },
- in0_it, in1_it, out_it);
+ }
+ },
+ in0_it, in1_it, out_it);
}
}
-void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
+void add_sub_qasymm8_neon(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition)
{
ARM_COMPUTE_UNUSED(policy);
@@ -304,7 +338,7 @@ void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst
const auto scale2 = is_addition ? (iq2_info.scale / oq_info.scale) : (-(iq2_info.scale / oq_info.scale));
const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -324,63 +358,64 @@ void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst
Iterator output(dst, win);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = non_broadcast_input.ptr();
- const auto output_ptr = output.ptr();
-
- const auto broadcast_value = *broadcast_input.ptr();
- const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
- const auto bfs = float(broadcast_value) * bf_scale + offset;
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ win,
+ [&](const Coordinates &)
{
- const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x);
+ const auto non_broadcast_input_ptr = non_broadcast_input.ptr();
+ const auto output_ptr = output.ptr();
+
+ const auto broadcast_value = *broadcast_input.ptr();
+ const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
+ const auto bfs = float(broadcast_value) * bf_scale + offset;
- const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
- const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x);
- const auto af_0 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
- const auto af_1 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
- const auto af_2 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
- const auto af_3 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
+ const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
+ const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
+ const auto af_0 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(af_0);
- rf_1 = vcvtnq_s32_f32(af_1);
- rf_2 = vcvtnq_s32_f32(af_2);
- rf_3 = vcvtnq_s32_f32(af_3);
+ rf_0 = vcvtnq_s32_f32(af_0);
+ rf_1 = vcvtnq_s32_f32(af_1);
+ rf_2 = vcvtnq_s32_f32(af_2);
+ rf_3 = vcvtnq_s32_f32(af_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(af_0);
- rf_1 = vcvtq_s32_f32(af_1);
- rf_2 = vcvtq_s32_f32(af_2);
- rf_3 = vcvtq_s32_f32(af_3);
+ rf_0 = vcvtq_s32_f32(af_0);
+ rf_1 = vcvtq_s32_f32(af_1);
+ rf_2 = vcvtq_s32_f32(af_2);
+ rf_3 = vcvtq_s32_f32(af_3);
#endif //__aarch64__
- const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
- }
+ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
+ }
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
#else // __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
#endif // __aarch64__
- }
- },
- broadcast_input, non_broadcast_input, output);
+ }
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -397,72 +432,78 @@ void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst
const auto voffset = vdupq_n_f32(offset);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto input1_ptr = input1.ptr();
- const auto input2_ptr = input2.ptr();
- const auto output_ptr = output.ptr();
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ win,
+ [&](const Coordinates &)
{
- const uint8x16_t a = vld1q_u8(input1_ptr + x);
- const uint8x16_t b = vld1q_u8(input2_ptr + x);
-
- const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
- const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
- const auto b_u16_0 = vmovl_u8(vget_low_u8(b));
- const auto b_u16_1 = vmovl_u8(vget_high_u8(b));
-
- const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
- const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
- const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
- const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
-
- const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_0))), vscale2);
- const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_0))), vscale2);
- const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_1))), vscale2);
- const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_1))), vscale2);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
+ const auto input1_ptr = input1.ptr();
+ const auto input2_ptr = input2.ptr();
+ const auto output_ptr = output.ptr();
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const uint8x16_t a = vld1q_u8(input1_ptr + x);
+ const uint8x16_t b = vld1q_u8(input2_ptr + x);
+
+ const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
+ const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
+ const auto b_u16_0 = vmovl_u8(vget_low_u8(b));
+ const auto b_u16_1 = vmovl_u8(vget_high_u8(b));
+
+ const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
+
+ const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_0))), vscale2);
+ const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_0))), vscale2);
+ const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_1))), vscale2);
+ const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_1))), vscale2);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(bf_0);
- rf_1 = vcvtnq_s32_f32(bf_1);
- rf_2 = vcvtnq_s32_f32(bf_2);
- rf_3 = vcvtnq_s32_f32(bf_3);
+ rf_0 = vcvtnq_s32_f32(bf_0);
+ rf_1 = vcvtnq_s32_f32(bf_1);
+ rf_2 = vcvtnq_s32_f32(bf_2);
+ rf_3 = vcvtnq_s32_f32(bf_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(bf_0);
- rf_1 = vcvtq_s32_f32(bf_1);
- rf_2 = vcvtq_s32_f32(bf_2);
- rf_3 = vcvtq_s32_f32(bf_3);
+ rf_0 = vcvtq_s32_f32(bf_0);
+ rf_1 = vcvtq_s32_f32(bf_1);
+ rf_2 = vcvtq_s32_f32(bf_2);
+ rf_3 = vcvtq_s32_f32(bf_3);
#endif //__aarch64__
- const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
- }
+ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
+ }
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
#else // __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
#endif // __aarch64__
- }
- },
- input1, input2, output);
+ }
+ },
+ input1, input2, output);
}
}
-void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
+void add_sub_qasymm8_signed_neon(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition)
{
ARM_COMPUTE_UNUSED(policy);
@@ -487,7 +528,7 @@ void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITens
const auto scale2 = is_addition ? (iq2_info.scale / oq_info.scale) : (-(iq2_info.scale / oq_info.scale));
const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -507,63 +548,64 @@ void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITens
Iterator output(dst, win);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
- const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
- const auto bfs = float(broadcast_value) * bf_scale + offset;
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ win,
+ [&](const Coordinates &)
{
- const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
+ const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
- const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
- const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
+ const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
+ const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
+ const auto bfs = float(broadcast_value) * bf_scale + offset;
- const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
- const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
- const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
- const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
+ const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
+ const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
+
+ const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(af_0);
- rf_1 = vcvtnq_s32_f32(af_1);
- rf_2 = vcvtnq_s32_f32(af_2);
- rf_3 = vcvtnq_s32_f32(af_3);
+ rf_0 = vcvtnq_s32_f32(af_0);
+ rf_1 = vcvtnq_s32_f32(af_1);
+ rf_2 = vcvtnq_s32_f32(af_2);
+ rf_3 = vcvtnq_s32_f32(af_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(af_0);
- rf_1 = vcvtq_s32_f32(af_1);
- rf_2 = vcvtq_s32_f32(af_2);
- rf_3 = vcvtq_s32_f32(af_3);
+ rf_0 = vcvtq_s32_f32(af_0);
+ rf_1 = vcvtq_s32_f32(af_1);
+ rf_2 = vcvtq_s32_f32(af_2);
+ rf_3 = vcvtq_s32_f32(af_3);
#endif //__aarch64__
- const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
- }
+ const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
+ }
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
#else // __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
#endif // __aarch64__
- }
- },
- broadcast_input, non_broadcast_input, output);
+ }
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -580,79 +622,102 @@ void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITens
const auto voffset = vdupq_n_f32(offset);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ win,
+ [&](const Coordinates &)
{
- const int8x16_t a = vld1q_s8(input1_ptr + x);
- const int8x16_t b = vld1q_s8(input2_ptr + x);
-
- const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
- const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
- const auto b_s16_0 = vmovl_s8(vget_low_s8(b));
- const auto b_s16_1 = vmovl_s8(vget_high_s8(b));
-
- const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
- const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
- const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
- const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
-
- const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2);
- const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2);
- const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2);
- const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
+ const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const int8x16_t a = vld1q_s8(input1_ptr + x);
+ const int8x16_t b = vld1q_s8(input2_ptr + x);
+
+ const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
+ const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
+ const auto b_s16_0 = vmovl_s8(vget_low_s8(b));
+ const auto b_s16_1 = vmovl_s8(vget_high_s8(b));
+
+ const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
+
+ const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2);
+ const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2);
+ const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2);
+ const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(bf_0);
- rf_1 = vcvtnq_s32_f32(bf_1);
- rf_2 = vcvtnq_s32_f32(bf_2);
- rf_3 = vcvtnq_s32_f32(bf_3);
+ rf_0 = vcvtnq_s32_f32(bf_0);
+ rf_1 = vcvtnq_s32_f32(bf_1);
+ rf_2 = vcvtnq_s32_f32(bf_2);
+ rf_3 = vcvtnq_s32_f32(bf_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(bf_0);
- rf_1 = vcvtq_s32_f32(bf_1);
- rf_2 = vcvtq_s32_f32(bf_2);
- rf_3 = vcvtq_s32_f32(bf_3);
+ rf_0 = vcvtq_s32_f32(bf_0);
+ rf_1 = vcvtq_s32_f32(bf_1);
+ rf_2 = vcvtq_s32_f32(bf_2);
+ rf_3 = vcvtq_s32_f32(bf_3);
#endif //__aarch64__
- const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
- }
+ const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
+ }
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
#else // __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
#endif // __aarch64__
- }
- },
- input1, input2, output);
+ }
+ },
+ input1, input2, output);
}
}
-template void add_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
-template void add_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
-
-template void add_sub_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
-template void add_sub_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
-
-void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
-void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+template void add_q8_neon_fixedpoint<int8_t>(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_q8_neon_fixedpoint<uint8_t>(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+
+template void add_sub_q8_neon_fixedpoint<int8_t>(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition);
+template void add_sub_q8_neon_fixedpoint<uint8_t>(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition);
+
+void add_sub_qasymm8_neon(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition);
+void add_sub_qasymm8_signed_neon(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition);
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/neon/impl.h b/src/cpu/kernels/add/generic/neon/impl.h
index fb786c5bc1..faa99baffe 100644
--- a/src/cpu/kernels/add/generic/neon/impl.h
+++ b/src/cpu/kernels/add/generic/neon/impl.h
@@ -26,8 +26,9 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Window.h"
#include "arm_compute/core/utils/misc/Traits.h"
+#include "arm_compute/core/Window.h"
+
#include "src/core/NEON/wrapper/wrapper.h"
namespace arm_compute
@@ -35,7 +36,8 @@ namespace arm_compute
namespace cpu
{
template <typename ScalarType>
-void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_same_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
/** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<ScalarType, wrapper::traits::BitWidth::W128>;
@@ -53,7 +55,7 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
const auto window_end_x = static_cast<int>(window.x().end());
const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -69,31 +71,36 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator output(dst, win);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const ScalarType *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
-
- const ScalarType broadcast_value = *reinterpret_cast<const ScalarType *>(broadcast_input.ptr());
- const auto broadcast_value_vec = wrapper::vdup_n(broadcast_value, ExactTagType{});
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ win,
+ [&](const Coordinates &)
{
- const auto non_broadcast_v = wrapper::vloadq(non_broadcast_input_ptr + x);
- const auto res = (policy == ConvertPolicy::SATURATE) ? wrapper::vqadd(broadcast_value_vec, non_broadcast_v) : wrapper::vadd(broadcast_value_vec, non_broadcast_v);
- wrapper::vstore(output_ptr + x, res);
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto non_broadcast_v = *(non_broadcast_input_ptr + x);
- *(output_ptr + x) = (policy == ConvertPolicy::SATURATE) ? wrapper::add_sat(broadcast_value, non_broadcast_v) : broadcast_value + non_broadcast_v;
- }
- },
- broadcast_input, non_broadcast_input, output);
+ const auto non_broadcast_input_ptr = reinterpret_cast<const ScalarType *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
+
+ const ScalarType broadcast_value = *reinterpret_cast<const ScalarType *>(broadcast_input.ptr());
+ const auto broadcast_value_vec = wrapper::vdup_n(broadcast_value, ExactTagType{});
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto non_broadcast_v = wrapper::vloadq(non_broadcast_input_ptr + x);
+ const auto res = (policy == ConvertPolicy::SATURATE)
+ ? wrapper::vqadd(broadcast_value_vec, non_broadcast_v)
+ : wrapper::vadd(broadcast_value_vec, non_broadcast_v);
+ wrapper::vstore(output_ptr + x, res);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const auto non_broadcast_v = *(non_broadcast_input_ptr + x);
+ *(output_ptr + x) = (policy == ConvertPolicy::SATURATE)
+ ? wrapper::add_sat(broadcast_value, non_broadcast_v)
+ : broadcast_value + non_broadcast_v;
+ }
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -106,31 +113,34 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator output(dst, win);
execute_window_loop(
- win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const ScalarType *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const ScalarType *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto val1 = wrapper::vloadq(input1_ptr + x);
- const auto val2 = wrapper::vloadq(input2_ptr + x);
- const auto res = (policy == ConvertPolicy::SATURATE) ? wrapper::vqadd(val1, val2) : wrapper::vadd(val1, val2);
- wrapper::vstore(output_ptr + x, res);
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
+ win,
+ [&](const Coordinates &)
{
- const auto val1 = *(input1_ptr + x);
- const auto val2 = *(input2_ptr + x);
- *(output_ptr + x) = (policy == ConvertPolicy::SATURATE) ? wrapper::add_sat(val1, val2) : val1 + val2;
- }
- },
- input1, input2, output);
+ const auto input1_ptr = reinterpret_cast<const ScalarType *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const ScalarType *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto val1 = wrapper::vloadq(input1_ptr + x);
+ const auto val2 = wrapper::vloadq(input2_ptr + x);
+ const auto res =
+ (policy == ConvertPolicy::SATURATE) ? wrapper::vqadd(val1, val2) : wrapper::vadd(val1, val2);
+ wrapper::vstore(output_ptr + x, res);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const auto val1 = *(input1_ptr + x);
+ const auto val2 = *(input2_ptr + x);
+ *(output_ptr + x) =
+ (policy == ConvertPolicy::SATURATE) ? wrapper::add_sat(val1, val2) : val1 + val2;
+ }
+ },
+ input1, input2, output);
}
}
@@ -138,17 +148,36 @@ bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo
bool sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst);
-bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, bool is_addition);
-
-void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
-
-void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0,
+ const ITensorInfo *src1,
+ const ITensorInfo *dst,
+ bool is_addition);
+
+void add_sub_qasymm8_neon(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition);
+
+void add_sub_qasymm8_signed_neon(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition);
template <typename ScalarType>
-void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+void add_q8_neon_fixedpoint(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
template <typename ScalarType>
-void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+void add_sub_q8_neon_fixedpoint(const ITensor *src0,
+ const ITensor *src1,
+ ITensor *dst,
+ const ConvertPolicy &policy,
+ const Window &window,
+ bool is_addition);
} // namespace cpu
} // namespace arm_compute
#endif // SRC_CORE_NEON_KERNELS_ADD_IMPL_H
diff --git a/src/cpu/kernels/add/generic/neon/integer.cpp b/src/cpu/kernels/add/generic/neon/integer.cpp
index 5698d6d552..f0bcebc9d2 100644
--- a/src/cpu/kernels/add/generic/neon/integer.cpp
+++ b/src/cpu/kernels/add/generic/neon/integer.cpp
@@ -28,19 +28,22 @@ namespace arm_compute
{
namespace cpu
{
-void add_u8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_u8_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_neon<uint8_t>(src0, src1, dst, policy, window);
}
-void add_s16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_s16_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_neon<int16_t>(src0, src1, dst, policy, window);
}
-void add_s32_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_s32_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_neon<int32_t>(src0, src1, dst, policy, window);
}
-}
+} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/neon/qasymm8.cpp b/src/cpu/kernels/add/generic/neon/qasymm8.cpp
index 69cca956c8..8195d229d9 100644
--- a/src/cpu/kernels/add/generic/neon/qasymm8.cpp
+++ b/src/cpu/kernels/add/generic/neon/qasymm8.cpp
@@ -23,15 +23,17 @@
*/
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
+
#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
{
namespace cpu
{
-void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_qasymm8_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
add_sub_qasymm8_neon(src0, src1, dst, policy, window, true /*is_addition*/);
}
} // namespace cpu
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
index dfdf8fe85b..7e23096239 100644
--- a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
+++ b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
@@ -23,15 +23,17 @@
*/
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
+
#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
{
namespace cpu
{
-void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_qasymm8_signed_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
add_sub_qasymm8_signed_neon(src0, src1, dst, policy, window, true /*is_addition*/);
}
} // namespace cpu
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/neon/qsymm16.cpp b/src/cpu/kernels/add/generic/neon/qsymm16.cpp
index e76e408d6e..ac2de0557a 100644
--- a/src/cpu/kernels/add/generic/neon/qsymm16.cpp
+++ b/src/cpu/kernels/add/generic/neon/qsymm16.cpp
@@ -25,14 +25,16 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/Traits.h"
-#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
+
#include "src/core/helpers/WindowHelpers.h"
+#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
namespace arm_compute
{
namespace cpu
{
-void add_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_qsymm16_neon(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
ARM_COMPUTE_UNUSED(policy);
@@ -57,7 +59,7 @@ void add_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -65,7 +67,7 @@ void add_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
- const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
+ const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
// Clear X Dimension on execution window as we handle manually
non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
@@ -74,48 +76,50 @@ void add_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const int16_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
-
- const int16_t broadcast_value = *reinterpret_cast<const int16_t *>(broadcast_input.ptr());
- const int16x8_t broadcast_value_vec = vdupq_n_s16(broadcast_value);
-
- const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(broadcast_value_vec))), vscale2);
- const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(broadcast_value_vec))), vscale2);
- const float bfs = static_cast<int32_t>(broadcast_value) * broadcast_qinfo.scale;
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
{
- const int16x8_t a = vld1q_s16(non_broadcast_input_ptr + x);
- const auto af_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1);
- const auto af_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
+ const auto non_broadcast_input_ptr = reinterpret_cast<const int16_t *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
+
+ const int16_t broadcast_value = *reinterpret_cast<const int16_t *>(broadcast_input.ptr());
+ const int16x8_t broadcast_value_vec = vdupq_n_s16(broadcast_value);
+
+ const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(broadcast_value_vec))), vscale2);
+ const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(broadcast_value_vec))), vscale2);
+ const float bfs = static_cast<int32_t>(broadcast_value) * broadcast_qinfo.scale;
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const int16x8_t a = vld1q_s16(non_broadcast_input_ptr + x);
+ const auto af_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1);
+ const auto af_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
+ rf_0 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
+ rf_1 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
+ rf_0 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
+ rf_1 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
#endif //__aarch64__
- const int16x8_t pa = vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1));
- vst1q_s16(output_ptr + x, pa);
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x)) * non_broadcast_qinfo.scale;
- *(output_ptr + x) = quantize_qsymm16((afs + bfs), oq_info);
- }
- },
- broadcast_input, non_broadcast_input, output);
+ const int16x8_t pa = vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1));
+ vst1q_s16(output_ptr + x, pa);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x)) * non_broadcast_qinfo.scale;
+ *(output_ptr + x) = quantize_qsymm16((afs + bfs), oq_info);
+ }
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -127,48 +131,50 @@ void add_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Iterator input2(src1, input2_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
{
- const int16x8_t a = vld1q_s16(input1_ptr + x);
- const int16x8_t b = vld1q_s16(input2_ptr + x);
-
- const auto af_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1);
- const auto af_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1);
- const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(b))), vscale2);
- const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(b))), vscale2);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
+ const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const int16x8_t a = vld1q_s16(input1_ptr + x);
+ const int16x8_t b = vld1q_s16(input2_ptr + x);
+
+ const auto af_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1);
+ const auto af_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1);
+ const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(b))), vscale2);
+ const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(b))), vscale2);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
+ rf_0 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
+ rf_1 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
+ rf_0 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
+ rf_1 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
#endif //__aarch64__
- const int16x8_t pa = vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1));
- vst1q_s16(output_ptr + x, pa);
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const float afs = static_cast<int32_t>((*(input1_ptr + x))) * iq1_info.scale;
- const float bfs = static_cast<int32_t>((*(input2_ptr + x))) * iq2_info.scale;
- *(output_ptr + x) = quantize_qsymm16((afs + bfs), dst->info()->quantization_info());
- }
- },
- input1, input2, output);
+ const int16x8_t pa = vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1));
+ vst1q_s16(output_ptr + x, pa);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const float afs = static_cast<int32_t>((*(input1_ptr + x))) * iq1_info.scale;
+ const float bfs = static_cast<int32_t>((*(input2_ptr + x))) * iq2_info.scale;
+ *(output_ptr + x) = quantize_qsymm16((afs + bfs), dst->info()->quantization_info());
+ }
+ },
+ input1, input2, output);
}
}
} // namespace cpu
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/sve/fp16.cpp b/src/cpu/kernels/add/generic/sve/fp16.cpp
index 581f3abded..01dfe6c44b 100644
--- a/src/cpu/kernels/add/generic/sve/fp16.cpp
+++ b/src/cpu/kernels/add/generic/sve/fp16.cpp
@@ -31,10 +31,11 @@ namespace arm_compute
{
namespace cpu
{
-void add_fp16_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_fp16_sve(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_sve<float16_t>(src0, src1, dst, policy, window);
}
-}
+} // namespace cpu
} // namespace arm_compute
#endif /* (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/add/generic/sve/fp32.cpp b/src/cpu/kernels/add/generic/sve/fp32.cpp
index b37799113a..56771a5411 100644
--- a/src/cpu/kernels/add/generic/sve/fp32.cpp
+++ b/src/cpu/kernels/add/generic/sve/fp32.cpp
@@ -24,15 +24,17 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+
#include "src/cpu/kernels/add/generic/sve/impl.h"
namespace arm_compute
{
namespace cpu
{
-void add_fp32_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_fp32_sve(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_sve<float>(src0, src1, dst, policy, window);
}
-}
+} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/sve/impl.cpp b/src/cpu/kernels/add/generic/sve/impl.cpp
index e8606436fd..ca850fcef4 100644
--- a/src/cpu/kernels/add/generic/sve/impl.cpp
+++ b/src/cpu/kernels/add/generic/sve/impl.cpp
@@ -23,17 +23,21 @@
*/
#include "src/cpu/kernels/add/generic/sve/impl.h"
+
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/utils/misc/Traits.h"
+
#include "src/core/NEON/SVEMath.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
+
#include <arm_sve.h>
namespace arm_compute
{
namespace cpu
{
template <typename ScalarType>
-void add_same_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_same_sve(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
const auto all_true_pg = wrapper::svptrue<ScalarType>();
const auto window_start_x = static_cast<int>(window.x().start());
@@ -53,7 +57,7 @@ void add_same_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator input2(src1, window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()));
Iterator output(dst, window);
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -68,28 +72,30 @@ void add_same_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const ScalarType *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
+ {
+ const auto non_broadcast_input_ptr = reinterpret_cast<const ScalarType *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
- const ScalarType broadcast_value = *reinterpret_cast<const ScalarType *>(broadcast_input.ptr());
- const auto broadcast_value_vec = wrapper::svdup_n(broadcast_value);
+ const ScalarType broadcast_value = *reinterpret_cast<const ScalarType *>(broadcast_input.ptr());
+ const auto broadcast_value_vec = wrapper::svdup_n(broadcast_value);
- int x = window_start_x;
- svbool_t pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
- do
- {
- const auto non_broadcast_v = svld1(pg, non_broadcast_input_ptr + x);
- auto res = is_sat ? wrapper::svqadd(broadcast_value_vec, non_broadcast_v) : svadd_z(pg, broadcast_value_vec, non_broadcast_v);
- svst1(pg, output_ptr + x, res);
-
- x += wrapper::svcnt<ScalarType>();
- pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
- }
- while(svptest_any(all_true_pg, pg));
- },
- broadcast_input, non_broadcast_input, output);
+ int x = window_start_x;
+ svbool_t pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
+ do
+ {
+ const auto non_broadcast_v = svld1(pg, non_broadcast_input_ptr + x);
+ auto res = is_sat ? wrapper::svqadd(broadcast_value_vec, non_broadcast_v)
+ : svadd_z(pg, broadcast_value_vec, non_broadcast_v);
+ svst1(pg, output_ptr + x, res);
+
+ x += wrapper::svcnt<ScalarType>();
+ pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
+ } while (svptest_any(all_true_pg, pg));
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -101,35 +107,41 @@ void add_same_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator input2(src1, input2_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const ScalarType *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const ScalarType *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
-
- int x = window_start_x;
- svbool_t pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
- do
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
{
- const auto val1 = svld1(pg, input1_ptr + x);
- const auto val2 = svld1(pg, input2_ptr + x);
- const auto res = is_sat ? wrapper::svqadd(val1, val2) : svadd_z(pg, val1, val2);
- svst1(pg, output_ptr + x, res);
-
- x += wrapper::svcnt<ScalarType>();
- pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
- }
- while(svptest_any(all_true_pg, pg));
- },
- input1, input2, output);
+ const auto input1_ptr = reinterpret_cast<const ScalarType *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const ScalarType *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
+
+ int x = window_start_x;
+ svbool_t pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
+ do
+ {
+ const auto val1 = svld1(pg, input1_ptr + x);
+ const auto val2 = svld1(pg, input2_ptr + x);
+ const auto res = is_sat ? wrapper::svqadd(val1, val2) : svadd_z(pg, val1, val2);
+ svst1(pg, output_ptr + x, res);
+
+ x += wrapper::svcnt<ScalarType>();
+ pg = wrapper::svwhilelt<ScalarType>(x, window_end_x);
+ } while (svptest_any(all_true_pg, pg));
+ },
+ input1, input2, output);
}
}
-template void add_same_sve<float>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
-template void add_same_sve<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
-template void add_same_sve<int16_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
-template void add_same_sve<int32_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_same_sve<float>(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_same_sve<uint8_t>(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_same_sve<int16_t>(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_same_sve<int32_t>(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
-template void add_same_sve<float16_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_same_sve<float16_t>(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
#endif /* (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/sve/impl.h b/src/cpu/kernels/add/generic/sve/impl.h
index 0136f14246..6a95d66826 100644
--- a/src/cpu/kernels/add/generic/sve/impl.h
+++ b/src/cpu/kernels/add/generic/sve/impl.h
@@ -33,7 +33,8 @@ namespace arm_compute
namespace cpu
{
template <typename ScalarType>
-void add_same_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+void add_same_sve(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
} // namespace cpu
} // namespace arm_compute
#endif // SRC_CORE_SVE_KERNELS_ADD_IMPL_H
diff --git a/src/cpu/kernels/add/generic/sve/integer.cpp b/src/cpu/kernels/add/generic/sve/integer.cpp
index 3642dccd7b..4d17f2adbd 100644
--- a/src/cpu/kernels/add/generic/sve/integer.cpp
+++ b/src/cpu/kernels/add/generic/sve/integer.cpp
@@ -24,25 +24,29 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+
#include "src/cpu/kernels/add/generic/sve/impl.h"
namespace arm_compute
{
namespace cpu
{
-void add_u8_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_u8_sve(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_sve<uint8_t>(src0, src1, dst, policy, window);
}
-void add_s16_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_s16_sve(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_sve<int16_t>(src0, src1, dst, policy, window);
}
-void add_s32_sve(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_s32_sve(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
return add_same_sve<int32_t>(src0, src1, dst, policy, window);
}
-}
+} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/sve2/qasymm8.cpp b/src/cpu/kernels/add/generic/sve2/qasymm8.cpp
index 1dec214aa0..40add9d51b 100644
--- a/src/cpu/kernels/add/generic/sve2/qasymm8.cpp
+++ b/src/cpu/kernels/add/generic/sve2/qasymm8.cpp
@@ -26,15 +26,18 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/Traits.h"
+
#include "src/core/NEON/SVEMath.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
+
#include <arm_sve.h>
namespace arm_compute
{
namespace cpu
{
-void add_qasymm8_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_qasymm8_sve2(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
ARM_COMPUTE_UNUSED(policy);
@@ -58,7 +61,7 @@ void add_qasymm8_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, co
const auto invvscaleo = svdup_n_f32(1.f / oq_info.scale);
const auto voffseto = svdup_n_f32(oq_info.offset);
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -78,48 +81,89 @@ void add_qasymm8_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
+ {
+ const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
- const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
- const svuint8_t broadcast_value_vec = svdup_n_u8(broadcast_value);
+ const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
+ const svuint8_t broadcast_value_vec = svdup_n_u8(broadcast_value);
- int x = window_start_x;
- svbool_t pg = svwhilelt_b8(x, window_end_x);
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b8(x, window_end_x);
- const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(broadcast_value_vec))), voffset2)), vscale2);
- const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(broadcast_value_vec))), voffset2)), vscale2);
- const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(broadcast_value_vec))), voffset2)), vscale2);
- const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(broadcast_value_vec))), voffset2)), vscale2);
+ const auto bf_0 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(
+ pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(broadcast_value_vec))),
+ voffset2)),
+ vscale2);
+ const auto bf_1 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(
+ pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(broadcast_value_vec))),
+ voffset2)),
+ vscale2);
+ const auto bf_2 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(
+ pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(broadcast_value_vec))),
+ voffset2)),
+ vscale2);
+ const auto bf_3 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(
+ pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(broadcast_value_vec))),
+ voffset2)),
+ vscale2);
- do
- {
- const svuint8_t a = svld1_u8(pg, non_broadcast_input_ptr + x);
-
- const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(a))), voffset1)), vscale1);
- const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(a))), voffset1)), vscale1);
- const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(a))), voffset1)), vscale1);
- const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(a))), voffset1)), vscale1);
-
- const auto rf_0 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
- const auto rf_1 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
- const auto rf_2 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
- const auto rf_3 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
-
- const auto pa = svqxtnt_u32(svqxtnb_u32(rf_0), rf_1);
- const auto pb = svqxtnt_u32(svqxtnb_u32(rf_2), rf_3);
-
- const auto res = svqxtnt_u16(svqxtnb_u16(pa), pb);
- svst1_u8(pg, output_ptr + x, res);
-
- x += svcntb();
- pg = svwhilelt_b8(x, window_end_x);
- }
- while(svptest_any(all_true_pg, pg));
- },
- broadcast_input, non_broadcast_input, output);
+ do
+ {
+ const svuint8_t a = svld1_u8(pg, non_broadcast_input_ptr + x);
+
+ const auto af_0 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(a))), voffset1)),
+ vscale1);
+ const auto af_1 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(a))), voffset1)),
+ vscale1);
+ const auto af_2 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(a))), voffset1)),
+ vscale1);
+ const auto af_3 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(a))), voffset1)),
+ vscale1);
+
+ const auto rf_0 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
+ const auto rf_1 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
+ const auto rf_2 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
+ const auto rf_3 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
+
+ const auto pa = svqxtnt_u32(svqxtnb_u32(rf_0), rf_1);
+ const auto pb = svqxtnt_u32(svqxtnb_u32(rf_2), rf_3);
+
+ const auto res = svqxtnt_u16(svqxtnb_u16(pa), pb);
+ svst1_u8(pg, output_ptr + x, res);
+
+ x += svcntb();
+ pg = svwhilelt_b8(x, window_end_x);
+ } while (svptest_any(all_true_pg, pg));
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -136,45 +180,82 @@ void add_qasymm8_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, co
const auto voffset1 = svdup_n_s32(iq1_info.offset);
const auto voffset2 = svdup_n_s32(iq2_info.offset);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
- int x = window_start_x;
- svbool_t pg = svwhilelt_b8(x, window_end_x);
- do
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
{
- const auto a = svld1_u8(pg, input1_ptr + x);
- const auto b = svld1_u8(pg, input2_ptr + x);
- const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(a))), voffset1)), vscale1);
- const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(a))), voffset1)), vscale1);
- const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(a))), voffset1)), vscale1);
- const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(a))), voffset1)), vscale1);
-
- const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(b))), voffset2)), vscale2);
- const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(b))), voffset2)), vscale2);
- const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(b))), voffset2)), vscale2);
- const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(b))), voffset2)), vscale2);
-
- const auto rf_0 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
- const auto rf_1 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
- const auto rf_2 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
- const auto rf_3 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
-
- const auto pa = svqxtnt_u32(svqxtnb_u32(rf_0), rf_1);
- const auto pb = svqxtnt_u32(svqxtnb_u32(rf_2), rf_3);
- const auto res = svqxtnt_u16(svqxtnb_u16(pa), pb);
-
- svst1_u8(pg, output_ptr + x, res);
-
- x += svcntb();
- pg = svwhilelt_b8(x, window_end_x);
- }
- while(svptest_any(all_true_pg, pg));
- },
- input1, input2, output);
+ const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b8(x, window_end_x);
+ do
+ {
+ const auto a = svld1_u8(pg, input1_ptr + x);
+ const auto b = svld1_u8(pg, input2_ptr + x);
+ const auto af_0 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(a))), voffset1)),
+ vscale1);
+ const auto af_1 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(a))), voffset1)),
+ vscale1);
+ const auto af_2 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(a))), voffset1)),
+ vscale1);
+ const auto af_3 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(a))), voffset1)),
+ vscale1);
+
+ const auto bf_0 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(b))), voffset2)),
+ vscale2);
+ const auto bf_1 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(b))), voffset2)),
+ vscale2);
+ const auto bf_2 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(b))), voffset2)),
+ vscale2);
+ const auto bf_3 = svmul_f32_z(
+ pg,
+ svcvt_f32_s32_z(pg,
+ svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(b))), voffset2)),
+ vscale2);
+
+ const auto rf_0 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
+ const auto rf_1 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
+ const auto rf_2 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
+ const auto rf_3 =
+ svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
+
+ const auto pa = svqxtnt_u32(svqxtnb_u32(rf_0), rf_1);
+ const auto pb = svqxtnt_u32(svqxtnb_u32(rf_2), rf_3);
+ const auto res = svqxtnt_u16(svqxtnb_u16(pa), pb);
+
+ svst1_u8(pg, output_ptr + x, res);
+
+ x += svcntb();
+ pg = svwhilelt_b8(x, window_end_x);
+ } while (svptest_any(all_true_pg, pg));
+ },
+ input1, input2, output);
}
}
} // namespace cpu
diff --git a/src/cpu/kernels/add/generic/sve2/qasymm8_signed.cpp b/src/cpu/kernels/add/generic/sve2/qasymm8_signed.cpp
index dae8899753..2e585115e1 100644
--- a/src/cpu/kernels/add/generic/sve2/qasymm8_signed.cpp
+++ b/src/cpu/kernels/add/generic/sve2/qasymm8_signed.cpp
@@ -26,15 +26,18 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/Traits.h"
+
#include "src/core/NEON/SVEMath.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
+
#include <arm_sve.h>
namespace arm_compute
{
namespace cpu
{
-void add_qasymm8_signed_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_qasymm8_signed_sve2(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
ARM_COMPUTE_UNUSED(policy);
@@ -57,7 +60,7 @@ void add_qasymm8_signed_sve2(const ITensor *src0, const ITensor *src1, ITensor *
const auto invvscaleo = svdup_n_f32(1.f / oq_info.scale);
const auto voffseto = svdup_n_f32(oq_info.offset);
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -78,46 +81,63 @@ void add_qasymm8_signed_sve2(const ITensor *src0, const ITensor *src1, ITensor *
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
- const auto broadcast_value_vec = svdup_n_s8(broadcast_value);
-
- int x = window_start_x;
- svbool_t pg = svwhilelt_b8(x, window_end_x);
- const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(broadcast_value_vec)), voffset2)), vscale2);
- const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(broadcast_value_vec)), voffset2)), vscale2);
- const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(broadcast_value_vec)), voffset2)), vscale2);
- const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(broadcast_value_vec)), voffset2)), vscale2);
-
- do
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
{
- const auto a = svld1_s8(pg, non_broadcast_input_ptr + x);
- const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(a)), voffset1)), vscale1);
- const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(a)), voffset1)), vscale1);
- const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(a)), voffset1)), vscale1);
- const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(a)), voffset1)), vscale1);
-
- const auto rf_0 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
- const auto rf_1 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
- const auto rf_2 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
- const auto rf_3 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
-
- const auto pa = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
- const auto pb = svqxtnt_s32(svqxtnb_s32(rf_2), rf_3);
- const auto res = svqxtnt_s16(svqxtnb_s16(pa), pb);
-
- svst1_s8(pg, output_ptr + x, res);
-
- x += svcntb();
- pg = svwhilelt_b8(x, window_end_x);
- }
- while(svptest_any(all_true_pg, pg));
- },
- broadcast_input, non_broadcast_input, output);
+ const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+
+ const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
+ const auto broadcast_value_vec = svdup_n_s8(broadcast_value);
+
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b8(x, window_end_x);
+ const auto bf_0 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(broadcast_value_vec)), voffset2)),
+ vscale2);
+ const auto bf_1 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(broadcast_value_vec)), voffset2)),
+ vscale2);
+ const auto bf_2 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(broadcast_value_vec)), voffset2)),
+ vscale2);
+ const auto bf_3 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(broadcast_value_vec)), voffset2)),
+ vscale2);
+
+ do
+ {
+ const auto a = svld1_s8(pg, non_broadcast_input_ptr + x);
+ const auto af_0 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(a)), voffset1)), vscale1);
+ const auto af_1 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(a)), voffset1)), vscale1);
+ const auto af_2 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(a)), voffset1)), vscale1);
+ const auto af_3 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(a)), voffset1)), vscale1);
+
+ const auto rf_0 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
+ const auto rf_1 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
+ const auto rf_2 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
+ const auto rf_3 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
+
+ const auto pa = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
+ const auto pb = svqxtnt_s32(svqxtnb_s32(rf_2), rf_3);
+ const auto res = svqxtnt_s16(svqxtnb_s16(pa), pb);
+
+ svst1_s8(pg, output_ptr + x, res);
+
+ x += svcntb();
+ pg = svwhilelt_b8(x, window_end_x);
+ } while (svptest_any(all_true_pg, pg));
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -134,46 +154,59 @@ void add_qasymm8_signed_sve2(const ITensor *src0, const ITensor *src1, ITensor *
const auto voffset1 = svdup_n_s32(iq1_info.offset);
const auto voffset2 = svdup_n_s32(iq2_info.offset);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- int x = window_start_x;
- svbool_t pg = svwhilelt_b8(x, window_end_x);
- do
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
{
- const auto a = svld1_s8(pg, input1_ptr + x);
- const auto b = svld1_s8(pg, input2_ptr + x);
-
- const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(a)), voffset1)), vscale1);
- const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(a)), voffset1)), vscale1);
- const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(a)), voffset1)), vscale1);
- const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(a)), voffset1)), vscale1);
-
- const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(b)), voffset2)), vscale2);
- const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(b)), voffset2)), vscale2);
- const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(b)), voffset2)), vscale2);
- const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(b)), voffset2)), vscale2);
-
- const auto rf_0 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
- const auto rf_1 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
- const auto rf_2 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
- const auto rf_3 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
-
- const auto pa = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
- const auto pb = svqxtnt_s32(svqxtnb_s32(rf_2), rf_3);
- const auto res = svqxtnt_s16(svqxtnb_s16(pa), pb);
-
- svst1_s8(pg, output_ptr + x, res);
-
- x += svcntb();
- pg = svwhilelt_b8(x, window_end_x);
- }
- while(svptest_any(svptrue_b8(), pg));
- },
- input1, input2, output);
+ const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b8(x, window_end_x);
+ do
+ {
+ const auto a = svld1_s8(pg, input1_ptr + x);
+ const auto b = svld1_s8(pg, input2_ptr + x);
+
+ const auto af_0 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(a)), voffset1)), vscale1);
+ const auto af_1 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(a)), voffset1)), vscale1);
+ const auto af_2 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(a)), voffset1)), vscale1);
+ const auto af_3 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(a)), voffset1)), vscale1);
+
+ const auto bf_0 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(b)), voffset2)), vscale2);
+ const auto bf_1 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(b)), voffset2)), vscale2);
+ const auto bf_2 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(b)), voffset2)), vscale2);
+ const auto bf_3 = svmul_f32_z(
+ pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(b)), voffset2)), vscale2);
+
+ const auto rf_0 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
+ const auto rf_1 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
+ const auto rf_2 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
+ const auto rf_3 =
+ svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
+
+ const auto pa = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
+ const auto pb = svqxtnt_s32(svqxtnb_s32(rf_2), rf_3);
+ const auto res = svqxtnt_s16(svqxtnb_s16(pa), pb);
+
+ svst1_s8(pg, output_ptr + x, res);
+
+ x += svcntb();
+ pg = svwhilelt_b8(x, window_end_x);
+ } while (svptest_any(svptrue_b8(), pg));
+ },
+ input1, input2, output);
}
}
} // namespace cpu
diff --git a/src/cpu/kernels/add/generic/sve2/qsymm16.cpp b/src/cpu/kernels/add/generic/sve2/qsymm16.cpp
index 8c48ded942..17a42c2138 100644
--- a/src/cpu/kernels/add/generic/sve2/qsymm16.cpp
+++ b/src/cpu/kernels/add/generic/sve2/qsymm16.cpp
@@ -26,15 +26,18 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/Traits.h"
+
#include "src/core/NEON/SVEMath.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
+
#include <arm_sve.h>
namespace arm_compute
{
namespace cpu
{
-void add_qsymm16_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void add_qsymm16_sve2(
+ const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
ARM_COMPUTE_UNUSED(policy);
@@ -59,7 +62,7 @@ void add_qsymm16_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, co
const auto invvscaleo = svdup_n_f32(1.f / oq_info.scale);
const auto all_true_pg = svptrue_b16();
- if(is_broadcast_across_x)
+ if (is_broadcast_across_x)
{
const bool is_broadcast_input_2 = input2_win.x().step() == 0;
Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
@@ -74,39 +77,40 @@ void add_qsymm16_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const int16_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
+ {
+ const auto non_broadcast_input_ptr = reinterpret_cast<const int16_t *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
- const int16_t broadcast_value = *reinterpret_cast<const int16_t *>(broadcast_input.ptr());
- const auto broadcast_value_vec = svdup_n_s16(broadcast_value);
+ const int16_t broadcast_value = *reinterpret_cast<const int16_t *>(broadcast_input.ptr());
+ const auto broadcast_value_vec = svdup_n_s16(broadcast_value);
- int x = window_start_x;
- svbool_t pg = svwhilelt_b16(x, window_end_x);
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b16(x, window_end_x);
- const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(broadcast_value_vec)), vscale2);
- const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(broadcast_value_vec)), vscale2);
+ const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(broadcast_value_vec)), vscale2);
+ const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(broadcast_value_vec)), vscale2);
- do
- {
- const auto a = svld1_s16(pg, non_broadcast_input_ptr + x);
- const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(a)), vscale1);
- const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(a)), vscale1);
+ do
+ {
+ const auto a = svld1_s16(pg, non_broadcast_input_ptr + x);
+ const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(a)), vscale1);
+ const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(a)), vscale1);
- const auto rf_0 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
- const auto rf_1 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
+ const auto rf_0 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
+ const auto rf_1 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
- const auto res = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
+ const auto res = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
- svst1_s16(pg, output_ptr + x, res);
+ svst1_s16(pg, output_ptr + x, res);
- x += svcnth();
- pg = svwhilelt_b16(x, window_end_x);
- }
- while(svptest_any(all_true_pg, pg));
- },
- broadcast_input, non_broadcast_input, output);
+ x += svcnth();
+ pg = svwhilelt_b16(x, window_end_x);
+ } while (svptest_any(all_true_pg, pg));
+ },
+ broadcast_input, non_broadcast_input, output);
}
else
{
@@ -118,37 +122,38 @@ void add_qsymm16_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Iterator input2(src1, input2_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
-
- int x = window_start_x;
- svbool_t pg = svwhilelt_b16(x, window_end_x);
- do
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
{
- auto a = svld1_s16(pg, input1_ptr + x);
- auto b = svld1_s16(pg, input2_ptr + x);
-
- const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(a)), vscale1);
- const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(a)), vscale1);
-
- const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(b)), vscale2);
- const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(b)), vscale2);
-
- const auto rf_0 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
- const auto rf_1 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
-
- const auto res = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
- svst1_s16(pg, output_ptr + x, res);
-
- x += svcnth();
- pg = svwhilelt_b16(x, window_end_x);
- }
- while(svptest_any(all_true_pg, pg));
- },
- input1, input2, output);
+ const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
+
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b16(x, window_end_x);
+ do
+ {
+ auto a = svld1_s16(pg, input1_ptr + x);
+ auto b = svld1_s16(pg, input2_ptr + x);
+
+ const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(a)), vscale1);
+ const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(a)), vscale1);
+
+ const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlb_s32(b)), vscale2);
+ const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svmovlt_s32(b)), vscale2);
+
+ const auto rf_0 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
+ const auto rf_1 = svcvt_s32_f32_z(pg, svmul_f32_z(pg, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
+
+ const auto res = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
+ svst1_s16(pg, output_ptr + x, res);
+
+ x += svcnth();
+ pg = svwhilelt_b16(x, window_end_x);
+ } while (svptest_any(all_true_pg, pg));
+ },
+ input1, input2, output);
}
}
} // namespace cpu
diff --git a/src/cpu/kernels/add/list.h b/src/cpu/kernels/add/list.h
index 7cdb70fd9e..1040c39a41 100644
--- a/src/cpu/kernels/add/list.h
+++ b/src/cpu/kernels/add/list.h
@@ -31,8 +31,9 @@ namespace arm_compute
{
namespace cpu
{
-#define DECLARE_ADD_KERNEL(func_name) \
- void func_name(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+#define DECLARE_ADD_KERNEL(func_name) \
+ void func_name(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, \
+ const Window &window)
DECLARE_ADD_KERNEL(add_qasymm8_neon);
DECLARE_ADD_KERNEL(add_qasymm8_signed_neon);
@@ -55,4 +56,4 @@ DECLARE_ADD_KERNEL(add_qsymm16_sve2);
} // namespace cpu
} // namespace arm_compute
-#endif // SRC_CORE_KERNELS_ADD_LIST_H \ No newline at end of file
+#endif // SRC_CORE_KERNELS_ADD_LIST_H