aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOmar Al Khatib <omar.alkhatib@arm.com>2022-12-20 14:36:45 +0000
committerOmar Al Khatib <omar.alkhatib@arm.com>2022-12-29 09:14:15 +0000
commit939b21ad4b9ed15d43b4ee8b17484e57ed55a01f (patch)
tree6c90f78ad73161ffd0f54956a77fa50ef1916819
parentbb1ab0505452691d0e536921cd4c4e2ce32e40ed (diff)
downloadComputeLibrary-939b21ad4b9ed15d43b4ee8b17484e57ed55a01f.tar.gz
Use CPU quantized addition kernel for quantized subtraction
Resolves : [COMPMID-5629] Signed-off-by: Omar Al Khatib <omar.alkhatib@arm.com> Change-Id: I061ea5bdafa3a01e66ff869d158f26a38d19e125 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8835 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/cpu/kernels/CpuSubKernel.cpp45
-rw-r--r--src/cpu/kernels/CpuSubKernel.h11
-rw-r--r--src/cpu/kernels/add/generic/neon/impl.cpp541
-rw-r--r--src/cpu/kernels/add/generic/neon/impl.h11
-rw-r--r--src/cpu/kernels/add/generic/neon/qasymm8.cpp182
-rw-r--r--src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp182
-rw-r--r--src/cpu/kernels/sub/neon/list.h10
-rw-r--r--src/cpu/kernels/sub/neon/qasymm8.cpp203
-rw-r--r--src/cpu/kernels/sub/neon/qasymm8_signed.cpp205
9 files changed, 540 insertions, 850 deletions
diff --git a/src/cpu/kernels/CpuSubKernel.cpp b/src/cpu/kernels/CpuSubKernel.cpp
index 37a087f115..875d613dca 100644
--- a/src/cpu/kernels/CpuSubKernel.cpp
+++ b/src/cpu/kernels/CpuSubKernel.cpp
@@ -29,14 +29,15 @@
#include "src/core/common/Registrars.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "src/cpu/kernels/add/generic/neon/impl.h"
#include "src/cpu/kernels/sub/neon/list.h"
#if defined(ENABLE_FP32_KERNELS)
namespace
{
- static constexpr size_t default_mws_N1_fp32_neon = 24385;
- static constexpr size_t default_mws_V1_fp32_neon = 40520;
-}
+static constexpr size_t default_mws_N1_fp32_neon = 24385;
+static constexpr size_t default_mws_V1_fp32_neon = 40520;
+} // namespace
#endif /* ENABLE_FP32_KERNELS */
namespace arm_compute
@@ -47,46 +48,59 @@ namespace kernels
{
namespace
{
+using CpuSubKernelDataTypeISASelectorData = CpuAddKernelDataTypeISASelectorData;
+using CpuSubKernelDataTypeISASelectorDataPtr = CpuAddKernelDataTypeISASelectorDataPtr;
+
static const std::vector<CpuSubKernel::SubKernel> available_kernels =
{
{
"neon_fp32_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
REGISTER_FP32_NEON(arm_compute::cpu::sub_same_neon<float>)
},
{
"neon_fp16_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.fp16; },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.fp16; },
REGISTER_FP16_NEON(arm_compute::cpu::sub_same_neon<float16_t>)
},
{
"neon_u8_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::U8); },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::U8); },
REGISTER_INTEGER_NEON(arm_compute::cpu::sub_same_neon<uint8_t>)
},
{
"neon_s16_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::S16); },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::S16); },
REGISTER_INTEGER_NEON(arm_compute::cpu::sub_same_neon<int16_t>)
},
{
"neon_s32_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::S32); },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::S32); },
REGISTER_INTEGER_NEON(arm_compute::cpu::sub_same_neon<int32_t>)
},
{
+ "neon_qu8_sub_fixedpoint",
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return ((data.dt == DataType::QASYMM8) && data.can_use_fixedpoint); },
+ REGISTER_QASYMM8_NEON(arm_compute::cpu::sub_qasymm8_neon_fixedpoint)
+ },
+ {
+ "neon_qs8_sub_fixedpoint",
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return ((data.dt == DataType::QASYMM8_SIGNED) && data.can_use_fixedpoint); },
+ REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::sub_qasymm8_signed_neon_fixedpoint)
+ },
+ {
"neon_qu8_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8); },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8); },
REGISTER_QASYMM8_NEON(arm_compute::cpu::sub_qasymm8_neon)
},
{
"neon_qs8_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::sub_qasymm8_signed_neon)
},
{
"neon_qs16_sub",
- [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QSYMM16); },
+ [](const CpuSubKernelDataTypeISASelectorData & data) { return (data.dt == DataType::QSYMM16); },
REGISTER_QSYMM16_NEON(arm_compute::cpu::sub_qsymm16_neon)
},
};
@@ -99,7 +113,8 @@ inline Status validate_arguments(const ITensorInfo &src0, const ITensorInfo &src
DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src0, &src1);
- const auto *uk = CpuSubKernel::get_implementation(DataTypeISASelectorData{ src0.data_type(), CPUInfo::get().get_isa() });
+ const auto can_use_fixedpoint = sub_q8_neon_fixedpoint_possible(&src0, &src1, &dst);
+ const auto uk = CpuSubKernel::get_implementation<CpuSubKernelDataTypeISASelectorData>(CpuSubKernelDataTypeISASelectorData{ src0.data_type(), CPUInfo::get().get_isa(), can_use_fixedpoint });
ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
@@ -131,7 +146,9 @@ void CpuSubKernel::configure(const ITensorInfo *src0, const ITensorInfo *src1, I
set_shape_if_empty(*dst, out_shape);
set_data_type_if_unknown(*dst, src0->data_type());
- const auto *uk = CpuSubKernel::get_implementation(DataTypeISASelectorData{ src0->data_type(), CPUInfo::get().get_isa() });
+ const auto can_use_fixedpoint = sub_q8_neon_fixedpoint_possible(src0, src1, dst);
+ const auto uk = CpuSubKernel::get_implementation<CpuSubKernelDataTypeISASelectorData>(CpuSubKernelDataTypeISASelectorData{ src0->data_type(), CPUInfo::get().get_isa(), can_use_fixedpoint });
+
ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
_policy = policy;
@@ -180,7 +197,7 @@ size_t CpuSubKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
return std::max(static_cast<size_t>(1), mws);
}
}
-#else /* ENABLE_FP32_KERNELS */
+#else /* ENABLE_FP32_KERNELS */
ARM_COMPUTE_UNUSED(platform);
#endif /* ENABLE_FP32_KERNELS */
return ICPPKernel::default_mws;
diff --git a/src/cpu/kernels/CpuSubKernel.h b/src/cpu/kernels/CpuSubKernel.h
index 3d80b34279..cd209d1837 100644
--- a/src/cpu/kernels/CpuSubKernel.h
+++ b/src/cpu/kernels/CpuSubKernel.h
@@ -37,7 +37,8 @@ namespace kernels
class CpuSubKernel : public ICpuKernel<CpuSubKernel>
{
private:
- using SubKernelPtr = std::add_pointer<void(const ITensor *, const ITensor *, ITensor *, const ConvertPolicy &, const Window &)>::type;
+ using SubKernelPtr = std::add_pointer<void(const ITensor *, const ITensor *, ITensor *, const ConvertPolicy &, const Window &)>::type;
+ using CpuSubKernelDataTypeISASelectorDataPtr = CpuAddKernelDataTypeISASelectorDataPtr;
public:
CpuSubKernel() = default;
@@ -70,7 +71,7 @@ public:
static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, ConvertPolicy policy);
// Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
const char *name() const override;
/** Return minimum workload size of the relevant kernel
@@ -84,9 +85,9 @@ public:
struct SubKernel
{
- const char *name;
- const DataTypeISASelectorPtr is_selected;
- SubKernelPtr ukernel;
+ const char *name;
+ const CpuSubKernelDataTypeISASelectorDataPtr is_selected;
+ SubKernelPtr ukernel;
};
static const std::vector<SubKernel> &get_available_kernels();
diff --git a/src/cpu/kernels/add/generic/neon/impl.cpp b/src/cpu/kernels/add/generic/neon/impl.cpp
index 5adb39682e..a1734d7dd6 100644
--- a/src/cpu/kernels/add/generic/neon/impl.cpp
+++ b/src/cpu/kernels/add/generic/neon/impl.cpp
@@ -64,7 +64,8 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const ScalarType *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
@@ -100,7 +101,8 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator input2(src1, input2_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const ScalarType *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const ScalarType *>(input2.ptr());
@@ -128,27 +130,38 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
}
}
+bool sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst)
+{
+ return add_sub_q8_neon_fixedpoint_possible(src0, src1, dst, false);
+}
+
bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst)
{
+ return add_sub_q8_neon_fixedpoint_possible(src0, src1, dst, true);
+}
+
+bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, bool is_addition)
+{
const auto iq0 = src0->quantization_info().uniform();
const auto iq1 = src1->quantization_info().uniform();
- const auto oq = dst->quantization_info().uniform();
+ const auto oq = dst->quantization_info().uniform();
const auto scale0 = iq0.scale / oq.scale;
const auto scale1 = iq1.scale / oq.scale;
- if(scale0 < -31.f || scale0 > 31.f || scale1 < -31.f || scale1 > 31.f)
+ if(scale0 < -15.f || scale0 > 15.f || scale1 < -15.f || scale1 > 15.f)
{
- // The scale factor cannot be stored as 6.10 signed fixed-point number.
+ // The scale factor cannot be stored as 5.11 signed fixed-point number.
return false;
}
const auto offset = float(oq.offset) - scale0 * float(iq0.offset) - scale1 * float(iq1.offset);
- const auto max_acc = (std::abs(scale0) + std::abs(scale1)) * 256.f + std::abs(offset);
- if(max_acc > 2097151.f) // 2^21 - 1
+ const auto max_acc = is_addition ? ((std::abs(scale0) + std::abs(scale1)) * 256.f + std::abs(offset)) : ((std::abs(scale0) - std::abs(scale1)) * 256.f + std::abs(offset));
+
+ if(max_acc > 1048575.f) // 2^20 - 1
{
- // It might not be possible to store the result as 22.10 signed fixed-point number.
+ // It might not be possible to store the result as 21.11 signed fixed-point number.
return false;
}
@@ -158,6 +171,12 @@ bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo
template <typename ScalarType>
void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
+ add_sub_q8_neon_fixedpoint<ScalarType>(src0, src1, dst, policy, window, true /*is_addition*/);
+}
+
+template <typename ScalarType>
+void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
+{
ARM_COMPUTE_UNUSED(policy);
const auto in0_info = src0->info();
@@ -174,36 +193,38 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
Window win = window;
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- constexpr int window_step_x = 16;
- const auto window_start_x = window.x().start();
- const auto window_end_x = window.x().end();
- const auto is_broadcast_across_x = in0_shape.x() != in1_shape.x();
-
- const auto iq0_info = in0_info->quantization_info().uniform();
- const auto iq1_info = in1_info->quantization_info().uniform();
- const auto oq_info = dst->info()->quantization_info().uniform();
+ constexpr int window_step_x = 16;
+ const auto window_start_x = window.x().start();
+ const auto window_end_x = window.x().end();
+ const auto is_broadcast_across_x = in0_shape.x() != in1_shape.x();
+ const auto iq0_info = in0_info->quantization_info().uniform();
+ const auto iq1_info = in1_info->quantization_info().uniform();
+ const auto oq_info = dst->info()->quantization_info().uniform();
const auto in0_scale = iq0_info.scale / oq_info.scale;
- const auto in1_scale = iq1_info.scale / oq_info.scale;
- const auto offset = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset);
+ const auto in1_scale = is_addition ? (iq1_info.scale / oq_info.scale) : (-(iq1_info.scale / oq_info.scale));
+ const auto offset = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset);
+
+ constexpr float _2pow11 = 2048;
+ const auto in0_scale_5p11 = static_cast<int16_t>(support::cpp11::lround(in0_scale * _2pow11));
+ const auto in1_scale_5p11 = static_cast<int16_t>(support::cpp11::lround(in1_scale * _2pow11));
+ const auto offset_21p11 = static_cast<int32_t>(support::cpp11::lround(offset * _2pow11));
- const auto in0_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in0_scale * 1024.f));
- const auto in1_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in1_scale * 1024.f));
- const auto offset_22p10 = static_cast<int32_t>(support::cpp11::lround(offset * 1024.f));
+ constexpr uint8_t shift_amount_remainder = 3;
if(is_broadcast_across_x)
{
// Prefix: a = non-broadcast, b = broadcast.
const auto is_broadcast_input_1 = in1_win.x().step() == 0;
- auto a_win = is_broadcast_input_1 ? in0_win : in1_win;
- auto b_win = is_broadcast_input_1 ? in1_win : in0_win;
- const auto a_tensor = is_broadcast_input_1 ? src0 : src1;
- const auto b_tensor = is_broadcast_input_1 ? src1 : src0;
+ auto a_win = is_broadcast_input_1 ? in0_win : in1_win;
+ auto b_win = is_broadcast_input_1 ? in1_win : in0_win;
+ const auto a_tensor = is_broadcast_input_1 ? src0 : src1;
+ const auto b_tensor = is_broadcast_input_1 ? src1 : src0;
- const auto a_scale_6p10 = is_broadcast_input_1 ? in0_scale_6p10 : in1_scale_6p10;
- const auto b_scale = is_broadcast_input_1 ? in1_scale : in0_scale;
- const auto a_vscale_6p10 = wrapper::vdup_n(a_scale_6p10, wrapper::traits::vector_64_tag());
+ const auto a_scale_5p11 = is_broadcast_input_1 ? in0_scale_5p11 : in1_scale_5p11;
+ const auto b_scale = is_broadcast_input_1 ? in1_scale : in0_scale;
+ const auto a_vscale_5p11 = wrapper::vdup_n(a_scale_5p11, wrapper::traits::vector_64_tag());
#ifndef __aarch64__
const auto a_scale = is_broadcast_input_1 ? in0_scale : in1_scale;
@@ -216,17 +237,18 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
Iterator b_input_it(b_tensor, b_win);
Iterator out_it(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
- const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
- const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
+ const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
+ const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
- const auto b_val = *b_ptr;
- const auto b_scaled = b_scale * b_val;
- const auto b_scaled_22p10 = static_cast<int32_t>(support::cpp11::lround(b_scaled * 1024.f));
- const auto b_scaled_offseted_22p10 = b_scaled_22p10 + offset_22p10;
- const auto b_vscaled_offseted_22p10 = wrapper::vdup_n(b_scaled_offseted_22p10, wrapper::traits::vector_128_tag());
+ const auto b_val = *b_ptr;
+ const auto b_scaled = b_scale * b_val;
+ const auto b_scaled_21p11 = static_cast<int32_t>(support::cpp11::lround(b_scaled * _2pow11));
+ const auto b_scaled_offseted_21p11 = b_scaled_21p11 + offset_21p11;
+ const auto b_vscaled_offseted_21p11 = wrapper::vdup_n(b_scaled_offseted_21p11, wrapper::traits::vector_128_tag());
#ifndef __aarch64__
const auto b_scaled_offseted = b_scaled + offset;
@@ -245,26 +267,23 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
// Multiply the non-broadcast elements by the scale factor, add the scaled broadcast elements and the offset.
// Widen and store the result in 32-bit integer.
- const auto vout_22p10_00 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_0), a_vscale_6p10);
- const auto vout_22p10_01 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_0), a_vscale_6p10);
- const auto vout_22p10_10 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_1), a_vscale_6p10);
- const auto vout_22p10_11 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_1), a_vscale_6p10);
+ const auto vout_21p11_00 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_0), a_vscale_5p11);
+ const auto vout_21p11_01 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_0), a_vscale_5p11);
+ const auto vout_21p11_10 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_1), a_vscale_5p11);
+ const auto vout_21p11_11 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_1), a_vscale_5p11);
- // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+ // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
const auto vout_8p8_0 = wrapper::vcombine(
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00),
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01)
- );
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
const auto vout_8p8_1 = wrapper::vcombine(
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10),
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11)
- );
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
// Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
const auto vout_8p0 = wrapper::vcombine(
- wrapper::vqrshrn<8>(vout_8p8_0),
- wrapper::vqrshrn<8>(vout_8p8_1)
- );
+ wrapper::vqrshrn<8>(vout_8p8_0),
+ wrapper::vqrshrn<8>(vout_8p8_1));
// Store the result.
wrapper::vstore(out_ptr + x, vout_8p0);
@@ -274,8 +293,8 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
for(; x < window_end_x; ++x)
{
#ifdef __aarch64__
- out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<2, ScalarType>(int32_t(a_ptr[x]) * a_scale_6p10 + b_scaled_offseted_22p10));
-#else // __aarch64__
+ out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(int32_t(a_ptr[x]) * a_scale_5p11 + b_scaled_offseted_21p11));
+#else // __aarch64__
out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(a_ptr[x]) * a_scale + b_scaled_offseted));
#endif // __aarch64__
}
@@ -284,9 +303,9 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
}
else
{
- const auto vscale0_6p10 = wrapper::vdup_n(in0_scale_6p10, wrapper::traits::vector_64_tag());
- const auto vscale1_6p10 = wrapper::vdup_n(in1_scale_6p10, wrapper::traits::vector_64_tag());
- const auto voffset_22p10 = wrapper::vdup_n(offset_22p10, wrapper::traits::vector_128_tag());
+ const auto vscale0_5p11 = wrapper::vdup_n(in0_scale_5p11, wrapper::traits::vector_64_tag());
+ const auto vscale1_5p11 = wrapper::vdup_n(in1_scale_5p11, wrapper::traits::vector_64_tag());
+ const auto voffset_21p11 = wrapper::vdup_n(offset_21p11, wrapper::traits::vector_128_tag());
// Clear the x dimension on the execution window as we process the whole row each iteration.
in0_win.set(Window::DimX, Window::Dimension(0, 1, 1));
@@ -296,7 +315,8 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
Iterator in1_it(src1, in1_win);
Iterator out_it(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr());
const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr());
@@ -318,31 +338,28 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
// Multiply the input elements by the scale factor and add the offset.
// Widen and store the result in 32-bit integer.
- const auto vscaled0_offseted_22p10_00 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_0), vscale0_6p10);
- const auto vscaled0_offseted_22p10_01 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_0), vscale0_6p10);
- const auto vscaled0_offseted_22p10_10 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_1), vscale0_6p10);
- const auto vscaled0_offseted_22p10_11 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_1), vscale0_6p10);
+ const auto vscaled0_offseted_21p11_00 = wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_0), vscale0_5p11);
+ const auto vscaled0_offseted_21p11_01 = wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_0), vscale0_5p11);
+ const auto vscaled0_offseted_21p11_10 = wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_1), vscale0_5p11);
+ const auto vscaled0_offseted_21p11_11 = wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_1), vscale0_5p11);
- const auto vout_22p10_00 = wrapper::vmlal(vscaled0_offseted_22p10_00, wrapper::vgetlow(vin1_16p0_0), vscale1_6p10);
- const auto vout_22p10_01 = wrapper::vmlal(vscaled0_offseted_22p10_01, wrapper::vgethigh(vin1_16p0_0), vscale1_6p10);
- const auto vout_22p10_10 = wrapper::vmlal(vscaled0_offseted_22p10_10, wrapper::vgetlow(vin1_16p0_1), vscale1_6p10);
- const auto vout_22p10_11 = wrapper::vmlal(vscaled0_offseted_22p10_11, wrapper::vgethigh(vin1_16p0_1), vscale1_6p10);
+ const auto vout_21p11_00 = wrapper::vmlal(vscaled0_offseted_21p11_00, wrapper::vgetlow(vin1_16p0_0), vscale1_5p11);
+ const auto vout_21p11_01 = wrapper::vmlal(vscaled0_offseted_21p11_01, wrapper::vgethigh(vin1_16p0_0), vscale1_5p11);
+ const auto vout_21p11_10 = wrapper::vmlal(vscaled0_offseted_21p11_10, wrapper::vgetlow(vin1_16p0_1), vscale1_5p11);
+ const auto vout_21p11_11 = wrapper::vmlal(vscaled0_offseted_21p11_11, wrapper::vgethigh(vin1_16p0_1), vscale1_5p11);
- // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+ // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
const auto vout_8p8_0 = wrapper::vcombine(
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00),
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01)
- );
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
const auto vout_8p8_1 = wrapper::vcombine(
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10),
- wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11)
- );
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
+ wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
// Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
const auto vout_8p0 = wrapper::vcombine(
- wrapper::vqrshrn<8>(vout_8p8_0),
- wrapper::vqrshrn<8>(vout_8p8_1)
- );
+ wrapper::vqrshrn<8>(vout_8p8_0),
+ wrapper::vqrshrn<8>(vout_8p8_1));
// Store the result.
wrapper::vstore(out_ptr + x, vout_8p0);
@@ -352,8 +369,8 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
for(; x < window_end_x; ++x)
{
#ifdef __aarch64__
- out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<2, ScalarType>(int32_t(in0_ptr[x]) * in0_scale_6p10 + int32_t(in1_ptr[x]) * in1_scale_6p10 + offset_22p10));
-#else // __aarch64__
+ out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(int32_t(in0_ptr[x]) * in0_scale_5p11 + int32_t(in1_ptr[x]) * in1_scale_5p11 + offset_21p11));
+#else // __aarch64__
out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(in0_ptr[x]) * in0_scale + float(in1_ptr[x]) * in1_scale + offset));
#endif // __aarch64__
}
@@ -362,6 +379,372 @@ void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *d
}
}
+void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
+{
+ ARM_COMPUTE_UNUSED(policy);
+
+ // Create input windows
+ Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
+ Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
+
+ // Clear X Dimension on execution window as we handle manually
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ constexpr int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+ const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
+
+ const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
+ const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
+
+ const auto scale1 = iq1_info.scale / oq_info.scale;
+ const auto scale2 = is_addition ? (iq2_info.scale / oq_info.scale) : (-(iq2_info.scale / oq_info.scale));
+ const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
+
+ if(is_broadcast_across_x)
+ {
+ const bool is_broadcast_input_2 = input2_win.x().step() == 0;
+ Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
+ Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
+ const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
+ const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
+
+ const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
+ const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
+ const auto vscale1 = vdupq_n_f32(af_scale);
+
+ // Clear X Dimension on execution window as we handle manually
+ non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator broadcast_input(broadcast_tensor, broadcast_win);
+ Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
+ Iterator output(dst, win);
+
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ const auto non_broadcast_input_ptr = non_broadcast_input.ptr();
+ const auto output_ptr = output.ptr();
+
+ const auto broadcast_value = *broadcast_input.ptr();
+ const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
+ const auto bfs = float(broadcast_value) * bf_scale + offset;
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x);
+
+ const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
+ const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
+
+ const auto af_0 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
+
+#ifdef __aarch64__
+ rf_0 = vcvtnq_s32_f32(af_0);
+ rf_1 = vcvtnq_s32_f32(af_1);
+ rf_2 = vcvtnq_s32_f32(af_2);
+ rf_3 = vcvtnq_s32_f32(af_3);
+#else //__aarch64__
+ rf_0 = vcvtq_s32_f32(af_0);
+ rf_1 = vcvtq_s32_f32(af_1);
+ rf_2 = vcvtq_s32_f32(af_2);
+ rf_3 = vcvtq_s32_f32(af_3);
+#endif //__aarch64__
+
+ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
+ }
+ },
+ broadcast_input, non_broadcast_input, output);
+ }
+ else
+ {
+ // Clear X Dimension on execution window as we handle manually
+ input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input1(src0, input1_win);
+ Iterator input2(src1, input2_win);
+ Iterator output(dst, win);
+
+ const auto vscale1 = vdupq_n_f32(scale1);
+ const auto vscale2 = vdupq_n_f32(scale2);
+ const auto voffset = vdupq_n_f32(offset);
+
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ const auto input1_ptr = input1.ptr();
+ const auto input2_ptr = input2.ptr();
+ const auto output_ptr = output.ptr();
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const uint8x16_t a = vld1q_u8(input1_ptr + x);
+ const uint8x16_t b = vld1q_u8(input2_ptr + x);
+
+ const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
+ const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
+ const auto b_u16_0 = vmovl_u8(vget_low_u8(b));
+ const auto b_u16_1 = vmovl_u8(vget_high_u8(b));
+
+ const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
+
+ const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_0))), vscale2);
+ const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_0))), vscale2);
+ const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_1))), vscale2);
+ const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_1))), vscale2);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
+
+#ifdef __aarch64__
+ rf_0 = vcvtnq_s32_f32(bf_0);
+ rf_1 = vcvtnq_s32_f32(bf_1);
+ rf_2 = vcvtnq_s32_f32(bf_2);
+ rf_3 = vcvtnq_s32_f32(bf_3);
+#else //__aarch64__
+ rf_0 = vcvtq_s32_f32(bf_0);
+ rf_1 = vcvtq_s32_f32(bf_1);
+ rf_2 = vcvtq_s32_f32(bf_2);
+ rf_3 = vcvtq_s32_f32(bf_3);
+#endif //__aarch64__
+
+ const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
+ }
+ },
+ input1, input2, output);
+ }
+}
+
+void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
+{
+ ARM_COMPUTE_UNUSED(policy);
+
+ // Create input windows
+ Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
+ Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
+
+ // Clear X Dimension on execution window as we handle manually
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ constexpr int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+ const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
+
+ const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
+ const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
+
+ const auto scale1 = iq1_info.scale / oq_info.scale;
+ const auto scale2 = is_addition ? (iq2_info.scale / oq_info.scale) : (-(iq2_info.scale / oq_info.scale));
+ const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
+
+ if(is_broadcast_across_x)
+ {
+ const bool is_broadcast_input_2 = input2_win.x().step() == 0;
+ Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
+ Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
+ const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
+ const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
+
+ const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
+ const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
+ const auto vscale1 = vdupq_n_f32(af_scale);
+
+ // Clear X Dimension on execution window as we handle manually
+ non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator broadcast_input(broadcast_tensor, broadcast_win);
+ Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
+ Iterator output(dst, win);
+
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+
+ const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
+ const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
+ const auto bfs = float(broadcast_value) * bf_scale + offset;
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
+
+ const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
+ const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
+
+ const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
+
+#ifdef __aarch64__
+ rf_0 = vcvtnq_s32_f32(af_0);
+ rf_1 = vcvtnq_s32_f32(af_1);
+ rf_2 = vcvtnq_s32_f32(af_2);
+ rf_3 = vcvtnq_s32_f32(af_3);
+#else //__aarch64__
+ rf_0 = vcvtq_s32_f32(af_0);
+ rf_1 = vcvtq_s32_f32(af_1);
+ rf_2 = vcvtq_s32_f32(af_2);
+ rf_3 = vcvtq_s32_f32(af_3);
+#endif //__aarch64__
+
+ const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
+ }
+ },
+ broadcast_input, non_broadcast_input, output);
+ }
+ else
+ {
+ // Clear X Dimension on execution window as we handle manually
+ input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input1(src0, input1_win);
+ Iterator input2(src1, input2_win);
+ Iterator output(dst, win);
+
+ const auto vscale1 = vdupq_n_f32(scale1);
+ const auto vscale2 = vdupq_n_f32(scale2);
+ const auto voffset = vdupq_n_f32(offset);
+
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const int8x16_t a = vld1q_s8(input1_ptr + x);
+ const int8x16_t b = vld1q_s8(input2_ptr + x);
+
+ const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
+ const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
+ const auto b_s16_0 = vmovl_s8(vget_low_s8(b));
+ const auto b_s16_1 = vmovl_s8(vget_high_s8(b));
+
+ const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
+
+ const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2);
+ const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2);
+ const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2);
+ const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2);
+
+ int32x4_t rf_0{};
+ int32x4_t rf_1{};
+ int32x4_t rf_2{};
+ int32x4_t rf_3{};
+
+#ifdef __aarch64__
+ rf_0 = vcvtnq_s32_f32(bf_0);
+ rf_1 = vcvtnq_s32_f32(bf_1);
+ rf_2 = vcvtnq_s32_f32(bf_2);
+ rf_3 = vcvtnq_s32_f32(bf_3);
+#else //__aarch64__
+ rf_0 = vcvtq_s32_f32(bf_0);
+ rf_1 = vcvtq_s32_f32(bf_1);
+ rf_2 = vcvtq_s32_f32(bf_2);
+ rf_3 = vcvtq_s32_f32(bf_3);
+#endif //__aarch64__
+
+ const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
+ const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
+ vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
+ }
+ },
+ input1, input2, output);
+ }
+}
+
template void add_same_neon<float>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
template void add_same_neon<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
template void add_same_neon<int32_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
@@ -374,5 +757,11 @@ template void add_same_neon<float16_t>(const ITensor *src0, const ITensor *src1,
template void add_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
template void add_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_sub_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+template void add_sub_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+
+void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/neon/impl.h b/src/cpu/kernels/add/generic/neon/impl.h
index 91f347ff9c..d544ef5728 100644
--- a/src/cpu/kernels/add/generic/neon/impl.h
+++ b/src/cpu/kernels/add/generic/neon/impl.h
@@ -35,8 +35,19 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst);
+bool sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst);
+
+bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, bool is_addition);
+
+void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+
+void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
+
template <typename ScalarType>
void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+
+template <typename ScalarType>
+void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
} // namespace cpu
} // namespace arm_compute
#endif // SRC_CORE_NEON_KERNELS_ADD_IMPL_H \ No newline at end of file
diff --git a/src/cpu/kernels/add/generic/neon/qasymm8.cpp b/src/cpu/kernels/add/generic/neon/qasymm8.cpp
index d8b4bca292..69cca956c8 100644
--- a/src/cpu/kernels/add/generic/neon/qasymm8.cpp
+++ b/src/cpu/kernels/add/generic/neon/qasymm8.cpp
@@ -21,12 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/core/utils/misc/Traits.h"
-#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
-#include "src/core/helpers/WindowHelpers.h"
#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
@@ -35,183 +31,7 @@ namespace cpu
{
void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- ARM_COMPUTE_UNUSED(policy);
-
- // Create input windows
- Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
- Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
-
- // Clear X Dimension on execution window as we handle manually
- Window win = window;
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- constexpr int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
- const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
-
- const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
-
- const auto scale1 = iq1_info.scale / oq_info.scale;
- const auto scale2 = iq2_info.scale / oq_info.scale;
- const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
-
- if(is_broadcast_across_x)
- {
- const bool is_broadcast_input_2 = input2_win.x().step() == 0;
- Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
- Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
- const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
- const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
-
- const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
- const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
- const auto vscale1 = vdupq_n_f32(af_scale);
-
- // Clear X Dimension on execution window as we handle manually
- non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator broadcast_input(broadcast_tensor, broadcast_win);
- Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
- Iterator output(dst, win);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = non_broadcast_input.ptr();
- const auto output_ptr = output.ptr();
-
- const auto broadcast_value = *broadcast_input.ptr();
- const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
- const auto bfs = float(broadcast_value) * bf_scale + offset;
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x);
-
- const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
- const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
-
- const auto af_0 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
- const auto af_1 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
- const auto af_2 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
- const auto af_3 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
-
-#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(af_0);
- rf_1 = vcvtnq_s32_f32(af_1);
- rf_2 = vcvtnq_s32_f32(af_2);
- rf_3 = vcvtnq_s32_f32(af_3);
-#else //__aarch64__
- rf_0 = vcvtq_s32_f32(af_0);
- rf_1 = vcvtq_s32_f32(af_1);
- rf_2 = vcvtq_s32_f32(af_2);
- rf_3 = vcvtq_s32_f32(af_3);
-#endif //__aarch64__
-
- const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
-#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
-#else // __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
-#endif // __aarch64__
- }
- },
- broadcast_input, non_broadcast_input, output);
- }
- else
- {
- // Clear X Dimension on execution window as we handle manually
- input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
- input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input1(src0, input1_win);
- Iterator input2(src1, input2_win);
- Iterator output(dst, win);
-
- const auto vscale1 = vdupq_n_f32(scale1);
- const auto vscale2 = vdupq_n_f32(scale2);
- const auto voffset = vdupq_n_f32(offset);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = input1.ptr();
- const auto input2_ptr = input2.ptr();
- const auto output_ptr = output.ptr();
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const uint8x16_t a = vld1q_u8(input1_ptr + x);
- const uint8x16_t b = vld1q_u8(input2_ptr + x);
-
- const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
- const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
- const auto b_u16_0 = vmovl_u8(vget_low_u8(b));
- const auto b_u16_1 = vmovl_u8(vget_high_u8(b));
-
- const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
- const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
- const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
- const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
-
- const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_0))), vscale2);
- const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_0))), vscale2);
- const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_1))), vscale2);
- const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_1))), vscale2);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
-
-#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(bf_0);
- rf_1 = vcvtnq_s32_f32(bf_1);
- rf_2 = vcvtnq_s32_f32(bf_2);
- rf_3 = vcvtnq_s32_f32(bf_3);
-#else //__aarch64__
- rf_0 = vcvtq_s32_f32(bf_0);
- rf_1 = vcvtq_s32_f32(bf_1);
- rf_2 = vcvtq_s32_f32(bf_2);
- rf_3 = vcvtq_s32_f32(bf_3);
-#endif //__aarch64__
-
- const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
-#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
-#else // __aarch64__
- output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
-#endif // __aarch64__
- }
- },
- input1, input2, output);
- }
+ add_sub_qasymm8_neon(src0, src1, dst, policy, window, true /*is_addition*/);
}
} // namespace cpu
} // namespace arm_compute \ No newline at end of file
diff --git a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
index a285e483ed..dfdf8fe85b 100644
--- a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
+++ b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
@@ -21,12 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/core/utils/misc/Traits.h"
-#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
-#include "src/core/helpers/WindowHelpers.h"
#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
@@ -35,183 +31,7 @@ namespace cpu
{
void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- ARM_COMPUTE_UNUSED(policy);
-
- // Create input windows
- Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
- Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
-
- // Clear X Dimension on execution window as we handle manually
- Window win = window;
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- constexpr int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
- const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
-
- const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
-
- const auto scale1 = iq1_info.scale / oq_info.scale;
- const auto scale2 = iq2_info.scale / oq_info.scale;
- const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
-
- if(is_broadcast_across_x)
- {
- const bool is_broadcast_input_2 = input2_win.x().step() == 0;
- Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
- Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
- const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
- const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
-
- const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
- const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
- const auto vscale1 = vdupq_n_f32(af_scale);
-
- // Clear X Dimension on execution window as we handle manually
- non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator broadcast_input(broadcast_tensor, broadcast_win);
- Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
- Iterator output(dst, win);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
- const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
- const auto bfs = float(broadcast_value) * bf_scale + offset;
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
-
- const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
- const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
-
- const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
- const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
- const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
- const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
-
-#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(af_0);
- rf_1 = vcvtnq_s32_f32(af_1);
- rf_2 = vcvtnq_s32_f32(af_2);
- rf_3 = vcvtnq_s32_f32(af_3);
-#else //__aarch64__
- rf_0 = vcvtq_s32_f32(af_0);
- rf_1 = vcvtq_s32_f32(af_1);
- rf_2 = vcvtq_s32_f32(af_2);
- rf_3 = vcvtq_s32_f32(af_3);
-#endif //__aarch64__
-
- const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
-#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
-#else // __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
-#endif // __aarch64__
- }
- },
- broadcast_input, non_broadcast_input, output);
- }
- else
- {
- // Clear X Dimension on execution window as we handle manually
- input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
- input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input1(src0, input1_win);
- Iterator input2(src1, input2_win);
- Iterator output(dst, win);
-
- const auto vscale1 = vdupq_n_f32(scale1);
- const auto vscale2 = vdupq_n_f32(scale2);
- const auto voffset = vdupq_n_f32(offset);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const int8x16_t a = vld1q_s8(input1_ptr + x);
- const int8x16_t b = vld1q_s8(input2_ptr + x);
-
- const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
- const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
- const auto b_s16_0 = vmovl_s8(vget_low_s8(b));
- const auto b_s16_1 = vmovl_s8(vget_high_s8(b));
-
- const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
- const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
- const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
- const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
-
- const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2);
- const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2);
- const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2);
- const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2);
-
- int32x4_t rf_0{};
- int32x4_t rf_1{};
- int32x4_t rf_2{};
- int32x4_t rf_3{};
-
-#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(bf_0);
- rf_1 = vcvtnq_s32_f32(bf_1);
- rf_2 = vcvtnq_s32_f32(bf_2);
- rf_3 = vcvtnq_s32_f32(bf_3);
-#else //__aarch64__
- rf_0 = vcvtq_s32_f32(bf_0);
- rf_1 = vcvtq_s32_f32(bf_1);
- rf_2 = vcvtq_s32_f32(bf_2);
- rf_3 = vcvtq_s32_f32(bf_3);
-#endif //__aarch64__
-
- const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
- const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
- vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
-#ifdef __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
-#else // __aarch64__
- output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
-#endif // __aarch64__
- }
- },
- input1, input2, output);
- }
+ add_sub_qasymm8_signed_neon(src0, src1, dst, policy, window, true /*is_addition*/);
}
} // namespace cpu
} // namespace arm_compute \ No newline at end of file
diff --git a/src/cpu/kernels/sub/neon/list.h b/src/cpu/kernels/sub/neon/list.h
index ac1346001a..f7e1a040bd 100644
--- a/src/cpu/kernels/sub/neon/list.h
+++ b/src/cpu/kernels/sub/neon/list.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,6 +35,8 @@ namespace cpu
#define DECLARE_SUB_KERNEL(func_name) \
void func_name(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+DECLARE_SUB_KERNEL(sub_qasymm8_neon_fixedpoint);
+DECLARE_SUB_KERNEL(sub_qasymm8_signed_neon_fixedpoint);
DECLARE_SUB_KERNEL(sub_qasymm8_neon);
DECLARE_SUB_KERNEL(sub_qasymm8_signed_neon);
DECLARE_SUB_KERNEL(sub_qsymm16_neon);
@@ -81,7 +83,8 @@ void sub_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const T *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<T *>(output.ptr());
@@ -127,7 +130,8 @@ void sub_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
Iterator input2(src1, input2_win);
Iterator output(dst, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const T *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const T *>(input2.ptr());
diff --git a/src/cpu/kernels/sub/neon/qasymm8.cpp b/src/cpu/kernels/sub/neon/qasymm8.cpp
index 8f4cd8bdbb..ea6e5826dd 100644
--- a/src/cpu/kernels/sub/neon/qasymm8.cpp
+++ b/src/cpu/kernels/sub/neon/qasymm8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,209 +21,22 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/core/utils/misc/Traits.h"
-#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
-#include "src/core/helpers/WindowHelpers.h"
+#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
{
namespace cpu
{
-void sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void sub_qasymm8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- ARM_COMPUTE_UNUSED(policy);
-
- // Create input windows
- Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
- Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
-
- // Clear X Dimension on execution window as we handle manually
- Window win = window;
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- const int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
- const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
-
- const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
-
- const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
- const float32x4_t voffseto = vdupq_n_f32(oq_info.offset);
-
- if(is_broadcast_across_x)
- {
- const bool is_broadcast_input_2 = input2_win.x().step() == 0;
- Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
- Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
- const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
- const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
- const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
- const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
- const float32x4_t vscale1 = is_broadcast_input_2 ? vdupq_n_f32(iq1_info.scale) : vdupq_n_f32(iq2_info.scale);
- const float32x4_t vscale2 = is_broadcast_input_2 ? vdupq_n_f32(iq2_info.scale) : vdupq_n_f32(iq1_info.scale);
- const int32x4_t voffset1 = is_broadcast_input_2 ? vdupq_n_s32(iq1_info.offset) : vdupq_n_s32(iq2_info.offset);
- const int32x4_t voffset2 = is_broadcast_input_2 ? vdupq_n_s32(iq2_info.offset) : vdupq_n_s32(iq1_info.offset);
-
- // Clear X Dimension on execution window as we handle manually
- non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator broadcast_input(broadcast_tensor, broadcast_win);
- Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
- Iterator output(dst, win);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
- const auto broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
- const auto broadcast_value_vec = wrapper::vdup_n(static_cast<uint8_t>(broadcast_value), wrapper::traits::vector_128_tag{});
-
- const float32x4x4_t bf =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(broadcast_value_vec))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(broadcast_value_vec))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(broadcast_value_vec))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(broadcast_value_vec))))), voffset2)), vscale2),
- }
- };
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto a = wrapper::vloadq(non_broadcast_input_ptr + x);
-
- const float32x4x4_t af =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- }
- };
-
- const int32x4x4_t rf =
- {
- {
-#ifdef __aarch64_
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[0], af.val[0]) : vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[1], af.val[1]) : vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[2], af.val[2]) : vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[3], af.val[3]) : vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#else //__aarch64__
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[0], af.val[0]) : vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[1], af.val[1]) : vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[2], af.val[2]) : vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[3], af.val[3]) : vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#endif //__aarch64__
- }
- };
-
- const auto pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
- const auto pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
- wrapper::vstore(output_ptr + x, wrapper::vcombine(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
- const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale;
- *(output_ptr + x) = quantize_qasymm8(is_broadcast_input_2 ? afs - bfs : bfs - afs, dst->info()->quantization_info());
- }
- },
- broadcast_input, non_broadcast_input, output);
- }
- else
- {
- const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale);
- const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
- const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset);
- const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset);
-
- // Clear X Dimension on execution window as we handle manually
- input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
- input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input1(src0, input1_win);
- Iterator input2(src1, input2_win);
- Iterator output(dst, win);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto a = wrapper::vloadq(input1_ptr + x);
- const auto b = wrapper::vloadq(input2_ptr + x);
-
- const float32x4x4_t af =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- }
- };
-
- const float32x4x4_t bf =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(b))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(b))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(b))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(b))))), voffset2)), vscale2),
- }
- };
-
- const int32x4x4_t rf =
- {
- {
-#ifdef __aarch64__
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#else //__aarch64__
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#endif //__aarch64__
- }
- };
-
- const auto pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
- const auto pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
- wrapper::vstore(output_ptr + x, wrapper::vcombine(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale;
- const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale;
+ add_sub_q8_neon_fixedpoint<uint8_t>(src0, src1, dst, policy, window, false /*is_addition*/);
+}
- *(output_ptr + x) = quantize_qasymm8((afs - bfs), dst->info()->quantization_info());
- }
- },
- input1, input2, output);
- }
+void sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+{
+ add_sub_qasymm8_neon(src0, src1, dst, policy, window, false /*is_addition*/);
}
} // namespace cpu
diff --git a/src/cpu/kernels/sub/neon/qasymm8_signed.cpp b/src/cpu/kernels/sub/neon/qasymm8_signed.cpp
index 2c9e411743..a86c7f22f6 100644
--- a/src/cpu/kernels/sub/neon/qasymm8_signed.cpp
+++ b/src/cpu/kernels/sub/neon/qasymm8_signed.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,209 +21,24 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/Helpers.h"
+
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
-#include "arm_compute/core/utils/misc/Traits.h"
-#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
-#include "src/core/helpers/WindowHelpers.h"
+#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
{
namespace cpu
{
-void sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+void sub_qasymm8_signed_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
{
- ARM_COMPUTE_UNUSED(policy);
-
- // Create input windows
- Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
- Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
-
- // Clear X Dimension on execution window as we handle manually
- Window win = window;
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- const int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
- const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
-
- const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
-
- const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
- const float32x4_t voffseto = vdupq_n_f32(oq_info.offset);
-
- if(is_broadcast_across_x)
- {
- const bool is_broadcast_input_2 = input2_win.x().step() == 0;
- Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
- Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
- const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
- const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
- const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
- const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
- const float32x4_t vscale1 = is_broadcast_input_2 ? vdupq_n_f32(iq1_info.scale) : vdupq_n_f32(iq2_info.scale);
- const float32x4_t vscale2 = is_broadcast_input_2 ? vdupq_n_f32(iq2_info.scale) : vdupq_n_f32(iq1_info.scale);
- const int32x4_t voffset1 = is_broadcast_input_2 ? vdupq_n_s32(iq1_info.offset) : vdupq_n_s32(iq2_info.offset);
- const int32x4_t voffset2 = is_broadcast_input_2 ? vdupq_n_s32(iq2_info.offset) : vdupq_n_s32(iq1_info.offset);
-
- // Clear X Dimension on execution window as we handle manually
- non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator broadcast_input(broadcast_tensor, broadcast_win);
- Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
- Iterator output(dst, win);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
- const auto broadcast_value_vec = wrapper::vdup_n(static_cast<int8_t>(broadcast_value), wrapper::traits::vector_128_tag{});
-
- const float32x4x4_t bf =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(broadcast_value_vec))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(broadcast_value_vec))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(broadcast_value_vec))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(broadcast_value_vec))))), voffset2)), vscale2),
- }
- };
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto a = wrapper::vloadq(non_broadcast_input_ptr + x);
-
- const float32x4x4_t af =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- }
- };
-
- const int32x4x4_t rf =
- {
- {
-#ifdef __aarch64_
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[0], af.val[0]) : vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[1], af.val[1]) : vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[2], af.val[2]) : vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[3], af.val[3]) : vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#else //__aarch64__
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[0], af.val[0]) : vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[1], af.val[1]) : vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[2], af.val[2]) : vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, !is_broadcast_input_2 ? vsubq_f32(bf.val[3], af.val[3]) : vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#endif //__aarch64__
- }
- };
-
- const auto pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
- const auto pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
- wrapper::vstore(output_ptr + x, wrapper::vcombine(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
- const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale;
- *(output_ptr + x) = quantize_qasymm8_signed(is_broadcast_input_2 ? afs - bfs : bfs - afs, dst->info()->quantization_info());
- }
- },
- broadcast_input, non_broadcast_input, output);
- }
- else
- {
- const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale);
- const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
- const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset);
- const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset);
-
- // Clear X Dimension on execution window as we handle manually
- input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
- input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input1(src0, input1_win);
- Iterator input2(src1, input2_win);
- Iterator output(dst, win);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto a = wrapper::vloadq(input1_ptr + x);
- const auto b = wrapper::vloadq(input2_ptr + x);
-
- const float32x4x4_t af =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(a))))), voffset1)), vscale1),
- }
- };
-
- const float32x4x4_t bf =
- {
- {
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgetlow(b))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgetlow(b))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vgethigh(b))))), voffset2)), vscale2),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(wrapper::vmovl(wrapper::vgethigh(b))))), voffset2)), vscale2),
- }
- };
-
- const int32x4x4_t rf =
- {
- {
-#ifdef __aarch64__
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtnq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#else //__aarch64__
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[0], bf.val[0]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[1], bf.val[1]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[2], bf.val[2]), invvscaleo)),
- vcvtq_s32_f32(vmlaq_f32(voffseto, vsubq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#endif //__aarch64__
- }
- };
-
- const auto pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
- const auto pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
- wrapper::vstore(output_ptr + x, wrapper::vcombine(pa, pb));
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale;
- const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale;
+ add_sub_q8_neon_fixedpoint<int8_t>(src0, src1, dst, policy, window, false /*is_addition*/);
+}
- *(output_ptr + x) = quantize_qasymm8_signed((afs - bfs), dst->info()->quantization_info());
- }
- },
- input1, input2, output);
- }
+void sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+{
+ add_sub_qasymm8_signed_neon(src0, src1, dst, policy, window, false /*is_addition*/);
}
+
} // namespace cpu
} // namespace arm_compute \ No newline at end of file