aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2022-09-22 10:24:23 +0100
committerViet-Hoa Do <viet-hoa.do@arm.com>2022-10-03 08:57:23 +0000
commit40b441905760846e9fdaca283a4a4de038a6ef0d (patch)
tree38a4f6b5122bfaf44a2a33e90b331a2e1a30b113
parentff81de5a9a0f6b9331c3b112cc2aed552f0482a9 (diff)
downloadComputeLibrary-40b441905760846e9fdaca283a4a4de038a6ef0d.tar.gz
Optimize CPU add layer on quantized data
* Use fixed-point arithmetic where possible. * Various optimization for the FP32-based implementation. This implementation is kept as the fall-back solution in case of unrealistic quantization parameters that exceed the range of fixed-point solution. Resolves: COMPMID-5458 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I221d2d3801ecaae4fe0b7cf6ae8ef00ca3743665 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8317 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/NEON/wrapper/intrinsics/intrinsics.h1
-rw-r--r--src/core/NEON/wrapper/intrinsics/shr.h78
-rw-r--r--src/cpu/kernels/CpuAddKernel.cpp22
-rw-r--r--src/cpu/kernels/CpuKernelSelectionTypes.h1
-rw-r--r--src/cpu/kernels/add/generic/neon/impl.cpp220
-rw-r--r--src/cpu/kernels/add/generic/neon/impl.h5
-rw-r--r--src/cpu/kernels/add/generic/neon/qasymm8.cpp130
-rw-r--r--src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp119
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp22
9 files changed, 473 insertions, 125 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/intrinsics.h b/src/core/NEON/wrapper/intrinsics/intrinsics.h
index 0256e0a8c8..97975ebe7c 100644
--- a/src/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/src/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -67,6 +67,7 @@
#include "src/core/NEON/wrapper/intrinsics/rev64.h"
#include "src/core/NEON/wrapper/intrinsics/round.h"
#include "src/core/NEON/wrapper/intrinsics/setlane.h"
+#include "src/core/NEON/wrapper/intrinsics/shr.h"
#include "src/core/NEON/wrapper/intrinsics/sin.h"
#include "src/core/NEON/wrapper/intrinsics/sqrt.h"
#include "src/core/NEON/wrapper/intrinsics/store.h"
diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h
new file mode 100644
index 0000000000..69fc254b61
--- /dev/null
+++ b/src/core/NEON/wrapper/intrinsics/shr.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_WRAPPER_SHR_H
+#define ARM_COMPUTE_WRAPPER_SHR_H
+
+#include <type_traits>
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+
+#define VQRSHRN_IMPL(half_vtype, vtype, prefix, postfix) \
+ template <int b> \
+ inline half_vtype vqrshrn(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQRSHRN_IMPL(int8x8_t, int16x8_t, vqrshrn_n, s16)
+VQRSHRN_IMPL(uint8x8_t, uint16x8_t, vqrshrn_n, u16)
+VQRSHRN_IMPL(int16x4_t, int32x4_t, vqrshrn_n, s32)
+VQRSHRN_IMPL(uint16x4_t, uint32x4_t, vqrshrn_n, u32)
+VQRSHRN_IMPL(int32x2_t, int64x2_t, vqrshrn_n, s64)
+VQRSHRN_IMPL(uint32x2_t, uint64x2_t, vqrshrn_n, u64)
+
+#undef VQRSHRN_IMPL
+
+// This function is the mixed version of VQRSHRN and VQRSHRUN.
+// The input vector is always signed integer, while the returned vector
+// can be either signed or unsigned depending on the signedness of scalar type T.
+#define VQRSHRN_EX_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_signed##_##postfix(a, b); \
+ } \
+ \
+ template <int b, typename T> \
+ inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
+ vqrshrn_ex(const vtype &a) \
+ { \
+ return prefix_unsigned##_##postfix(a, b); \
+ }
+
+VQRSHRN_EX_IMPL(int8x8_t, int16x8_t, vqrshrn_n, vqrshrun_n, s16)
+VQRSHRN_EX_IMPL(int16x4_t, int32x4_t, vqrshrn_n, vqrshrun_n, s32)
+VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64)
+
+#undef VQRSHRN_EX_IMPL
+
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_WRAPPER_SHR_H */
diff --git a/src/cpu/kernels/CpuAddKernel.cpp b/src/cpu/kernels/CpuAddKernel.cpp
index 8fc82ec184..47ff6abf17 100644
--- a/src/cpu/kernels/CpuAddKernel.cpp
+++ b/src/cpu/kernels/CpuAddKernel.cpp
@@ -49,6 +49,22 @@ namespace
static const std::vector<CpuAddKernel::AddKernel> available_kernels =
{
{
+ "neon_qu8_add_fixedpoint",
+ [](const CpuAddKernelDataTypeISASelectorData & data)
+ {
+ return (data.dt == DataType::QASYMM8) && data.can_use_fixedpoint;
+ },
+ REGISTER_FP32_NEON(arm_compute::cpu::add_q8_neon_fixedpoint<uint8_t>)
+ },
+ {
+ "neon_qs8_add_fixedpoint",
+ [](const CpuAddKernelDataTypeISASelectorData & data)
+ {
+ return (data.dt == DataType::QASYMM8_SIGNED) && data.can_use_fixedpoint;
+ },
+ REGISTER_FP32_NEON(arm_compute::cpu::add_q8_neon_fixedpoint<int8_t>)
+ },
+ {
"neon_fp32_add_as_1d_array",
[](const CpuAddKernelDataTypeISASelectorData & data)
{
@@ -222,8 +238,9 @@ Status validate_arguments(const ITensorInfo &src0, const ITensorInfo &src1, cons
"Wrong shape for dst");
}
+ const auto can_use_fixedpoint = add_q8_neon_fixedpoint_possible(&src0, &src1, &dst);
const auto uk = CpuAddKernel::get_implementation<CpuAddKernelDataTypeISASelectorData>(CpuAddKernelDataTypeISASelectorData{ src0.data_type(),
- CPUInfo::get().get_isa(), can_interpret_inputs_as_1d_array(src0, src1) });
+ CPUInfo::get().get_isa(), can_interpret_inputs_as_1d_array(src0, src1), can_use_fixedpoint });
ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
return Status{};
@@ -259,8 +276,9 @@ void CpuAddKernel::configure(const ITensorInfo *src0, const ITensorInfo *src1, I
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*src0, *src1, *dst, policy));
_can_interpret_inputs_as_1d_array = can_interpret_inputs_as_1d_array(*src0, *src1);
+ const auto can_use_fixedpoint = add_q8_neon_fixedpoint_possible(src0, src1, dst);
const auto uk = CpuAddKernel::get_implementation<CpuAddKernelDataTypeISASelectorData>(CpuAddKernelDataTypeISASelectorData{ src0->data_type(),
- CPUInfo::get().get_isa(), _can_interpret_inputs_as_1d_array });
+ CPUInfo::get().get_isa(), _can_interpret_inputs_as_1d_array, can_use_fixedpoint });
ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
diff --git a/src/cpu/kernels/CpuKernelSelectionTypes.h b/src/cpu/kernels/CpuKernelSelectionTypes.h
index e3ecc4e709..87edb15192 100644
--- a/src/cpu/kernels/CpuKernelSelectionTypes.h
+++ b/src/cpu/kernels/CpuKernelSelectionTypes.h
@@ -88,6 +88,7 @@ struct CpuAddKernelDataTypeISASelectorData
DataType dt;
cpuinfo::CpuIsaInfo isa;
bool can_interpret_inputs_as_1d_array;
+ bool can_use_fixedpoint;
};
struct ScaleKernelDataTypeISASelectorData
diff --git a/src/cpu/kernels/add/generic/neon/impl.cpp b/src/cpu/kernels/add/generic/neon/impl.cpp
index 67985c985e..0f7b31c754 100644
--- a/src/cpu/kernels/add/generic/neon/impl.cpp
+++ b/src/cpu/kernels/add/generic/neon/impl.cpp
@@ -157,6 +157,223 @@ void add_same_neon_as_1d_array(const ITensor *src0, const ITensor *src1, ITensor
}
}
+bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst)
+{
+ const auto iq0 = src0->quantization_info().uniform();
+ const auto iq1 = src1->quantization_info().uniform();
+ const auto oq = dst->quantization_info().uniform();
+
+ const auto scale0 = iq0.scale / oq.scale;
+ const auto scale1 = iq1.scale / oq.scale;
+
+ if(scale0 < -31.f || scale0 > 31.f || scale1 < -31.f || scale1 > 31.f)
+ {
+ // The scale factor cannot be stored as 6.10 signed fixed-point number.
+ return false;
+ }
+
+ const auto offset = float(oq.offset) - scale0 * float(iq0.offset) - scale1 * float(iq1.offset);
+ const auto max_acc = (std::abs(scale0) + std::abs(scale1)) * 1024.f + std::abs(offset);
+
+ if(max_acc > 2097151.f) // 2^21 - 1
+ {
+ // It might not be possible to store the result as 22.10 signed fixed-point number.
+ return false;
+ }
+
+ return true;
+}
+
+template <typename ScalarType>
+void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+{
+ ARM_COMPUTE_UNUSED(policy);
+
+ const auto in0_info = src0->info();
+ const auto in1_info = src1->info();
+
+ const auto &in0_shape = in0_info->tensor_shape();
+ const auto &in1_shape = in1_info->tensor_shape();
+
+ // Create input windows.
+ Window in0_win = window.broadcast_if_dimension_le_one(in0_shape);
+ Window in1_win = window.broadcast_if_dimension_le_one(in1_shape);
+
+ // Clear the x dimension on the execution window as we process the whole row each iteration.
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ constexpr int window_step_x = 16;
+ const auto window_start_x = window.x().start();
+ const auto window_end_x = window.x().end();
+ const auto is_broadcast_across_x = in0_shape.x() != in1_shape.x();
+
+ const auto iq0_info = in0_info->quantization_info().uniform();
+ const auto iq1_info = in1_info->quantization_info().uniform();
+ const auto oq_info = dst->info()->quantization_info().uniform();
+
+ const auto in0_scale = iq0_info.scale / oq_info.scale;
+ const auto in1_scale = iq1_info.scale / oq_info.scale;
+ const auto offset = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset);
+
+ const auto in0_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in0_scale * 1024.f));
+ const auto in1_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in1_scale * 1024.f));
+ const auto offset_22p10 = static_cast<int32_t>(support::cpp11::lround(offset * 1024.f));
+
+ if(is_broadcast_across_x)
+ {
+ // Prefix: a = non-broadcast, b = broadcast.
+
+ const auto is_broadcast_input_1 = in1_win.x().step() == 0;
+ auto a_win = is_broadcast_input_1 ? in0_win : in1_win;
+ auto b_win = is_broadcast_input_1 ? in1_win : in0_win;
+ const auto a_tensor = is_broadcast_input_1 ? src0 : src1;
+ const auto b_tensor = is_broadcast_input_1 ? src1 : src0;
+
+ const auto a_scale_6p10 = is_broadcast_input_1 ? in0_scale_6p10 : in1_scale_6p10;
+ const auto b_scale = is_broadcast_input_1 ? in1_scale : in0_scale;
+ const auto a_vscale_6p10 = wrapper::vdup_n(a_scale_6p10, wrapper::traits::vector_64_tag());
+
+ // Clear the x dimension on the execution window as we process the whole row each iteration.
+ a_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator a_input_it(a_tensor, a_win);
+ Iterator b_input_it(b_tensor, b_win);
+ Iterator out_it(dst, win);
+
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
+ const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+ const auto b_val = *b_ptr;
+ const auto b_scaled_22p10 = static_cast<int32_t>(support::cpp11::lround(b_scale * b_val * 1024.f));
+ const auto b_vscaled_offseted_22p10 = wrapper::vdup_n(b_scaled_22p10 + offset_22p10, wrapper::traits::vector_128_tag());
+
+ int x = window_start_x;
+
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Load the input.
+ const auto a_vin_8p0 = wrapper::vloadq(a_ptr + x);
+
+ // Widen the non-broadcast elements to signed 16-bit regardless of the input signedness.
+ const auto a_vin_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(a_vin_8p0)));
+ const auto a_vin_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(a_vin_8p0)));
+
+ // Multiply the non-broadcast elements by the scale factor, add the scaled broadcast elements and the offset.
+ // Widen and store the result in 32-bit integer.
+ const auto vout_22p10_00 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_0), a_vscale_6p10);
+ const auto vout_22p10_01 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_0), a_vscale_6p10);
+ const auto vout_22p10_10 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_1), a_vscale_6p10);
+ const auto vout_22p10_11 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_1), a_vscale_6p10);
+
+ // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+ const auto vout_8p8_0 = wrapper::vcombine(
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00),
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01)
+ );
+ const auto vout_8p8_1 = wrapper::vcombine(
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10),
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11)
+ );
+
+ // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
+ const auto vout_8p0 = wrapper::vcombine(
+ wrapper::vqrshrn<8>(vout_8p8_0),
+ wrapper::vqrshrn<8>(vout_8p8_1)
+ );
+
+ // Store the result.
+ wrapper::vstore(out_ptr + x, vout_8p0);
+ }
+
+ // Process the left-over elements.
+ for(; x < window_end_x; ++x)
+ {
+ out_ptr[x] = utility::clamp<int32_t, ScalarType>((int32_t(a_ptr[x]) * a_scale_6p10 + b_scaled_22p10 + offset_22p10) >> 10);
+ }
+ },
+ b_input_it, a_input_it, out_it);
+ }
+ else
+ {
+ const auto vscale0_6p10 = wrapper::vdup_n(in0_scale_6p10, wrapper::traits::vector_64_tag());
+ const auto vscale1_6p10 = wrapper::vdup_n(in1_scale_6p10, wrapper::traits::vector_64_tag());
+ const auto voffset_22p10 = wrapper::vdup_n(offset_22p10, wrapper::traits::vector_128_tag());
+
+ // Clear the x dimension on the execution window as we process the whole row each iteration.
+ in0_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ in1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in0_it(src0, in0_win);
+ Iterator in1_it(src1, in1_win);
+ Iterator out_it(dst, win);
+
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr());
+ const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr());
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+ int x = window_start_x;
+
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Load the inputs.
+ const auto vin0_8p0 = wrapper::vloadq(in0_ptr + x);
+ const auto vin1_8p0 = wrapper::vloadq(in1_ptr + x);
+
+ // Widen the input elements to signed 16-bit regardless of the input signedness.
+ const auto vin0_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin0_8p0)));
+ const auto vin0_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin0_8p0)));
+ const auto vin1_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin1_8p0)));
+ const auto vin1_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin1_8p0)));
+
+ // Multiply the input elements by the scale factor and add the offset.
+ // Widen and store the result in 32-bit integer.
+ const auto vscaled0_offseted_22p10_00 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_0), vscale0_6p10);
+ const auto vscaled0_offseted_22p10_01 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_0), vscale0_6p10);
+ const auto vscaled0_offseted_22p10_10 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_1), vscale0_6p10);
+ const auto vscaled0_offseted_22p10_11 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_1), vscale0_6p10);
+
+ const auto vout_22p10_00 = wrapper::vmlal(vscaled0_offseted_22p10_00, wrapper::vgetlow(vin1_16p0_0), vscale1_6p10);
+ const auto vout_22p10_01 = wrapper::vmlal(vscaled0_offseted_22p10_01, wrapper::vgethigh(vin1_16p0_0), vscale1_6p10);
+ const auto vout_22p10_10 = wrapper::vmlal(vscaled0_offseted_22p10_10, wrapper::vgetlow(vin1_16p0_1), vscale1_6p10);
+ const auto vout_22p10_11 = wrapper::vmlal(vscaled0_offseted_22p10_11, wrapper::vgethigh(vin1_16p0_1), vscale1_6p10);
+
+ // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+ const auto vout_8p8_0 = wrapper::vcombine(
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00),
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01)
+ );
+ const auto vout_8p8_1 = wrapper::vcombine(
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10),
+ wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11)
+ );
+
+ // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
+ const auto vout_8p0 = wrapper::vcombine(
+ wrapper::vqrshrn<8>(vout_8p8_0),
+ wrapper::vqrshrn<8>(vout_8p8_1)
+ );
+
+ // Store the result.
+ wrapper::vstore(out_ptr + x, vout_8p0);
+ }
+
+ // Process the left-over elements.
+ for(; x < window_end_x; ++x)
+ {
+ out_ptr[x] = utility::clamp<int32_t, ScalarType>(
+ (int32_t(in0_ptr[x]) * in0_scale_6p10 + int32_t(in1_ptr[x]) * in1_scale_6p10 + offset_22p10) >> 10);
+ }
+ },
+ in0_it, in1_it, out_it);
+ }
+}
+
template void add_same_neon<float>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
template void add_same_neon<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
template void add_same_neon<int32_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
@@ -175,5 +392,8 @@ template void add_same_neon_as_1d_array<int16_t>(const ITensor *src0, const ITen
template void add_same_neon_as_1d_array<float16_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
#endif /* (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
+template void add_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/add/generic/neon/impl.h b/src/cpu/kernels/add/generic/neon/impl.h
index f8f0f517b0..e6a12fb4c0 100644
--- a/src/cpu/kernels/add/generic/neon/impl.h
+++ b/src/cpu/kernels/add/generic/neon/impl.h
@@ -35,6 +35,11 @@ void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const
template <typename ScalarType>
void add_same_neon_as_1d_array(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+
+bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst);
+
+template <typename ScalarType>
+void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
} // namespace cpu
} // namespace arm_compute
#endif // SRC_CORE_NEON_KERNELS_ADD_IMPL_H \ No newline at end of file
diff --git a/src/cpu/kernels/add/generic/neon/qasymm8.cpp b/src/cpu/kernels/add/generic/neon/qasymm8.cpp
index e357a7ef7f..d8b4bca292 100644
--- a/src/cpu/kernels/add/generic/neon/qasymm8.cpp
+++ b/src/cpu/kernels/add/generic/neon/qasymm8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,6 +27,7 @@
#include "arm_compute/core/utils/misc/Traits.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
{
@@ -44,7 +45,7 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Window win = window;
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- const int window_step_x = 16;
+ constexpr int window_step_x = 16;
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
@@ -53,8 +54,9 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
- const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
- const float32x4_t voffseto = vdupq_n_f32(oq_info.offset);
+ const auto scale1 = iq1_info.scale / oq_info.scale;
+ const auto scale2 = iq2_info.scale / oq_info.scale;
+ const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
if(is_broadcast_across_x)
{
@@ -63,13 +65,10 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
- const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
- const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
- const float32x4_t vscale1 = is_broadcast_input_2 ? vdupq_n_f32(iq1_info.scale) : vdupq_n_f32(iq2_info.scale);
- const float32x4_t vscale2 = is_broadcast_input_2 ? vdupq_n_f32(iq2_info.scale) : vdupq_n_f32(iq1_info.scale);
- const int32x4_t voffset1 = is_broadcast_input_2 ? vdupq_n_s32(iq1_info.offset) : vdupq_n_s32(iq2_info.offset);
- const int32x4_t voffset2 = is_broadcast_input_2 ? vdupq_n_s32(iq2_info.offset) : vdupq_n_s32(iq1_info.offset);
+ const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
+ const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
+ const auto vscale1 = vdupq_n_f32(af_scale);
// Clear X Dimension on execution window as we handle manually
non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
@@ -80,28 +79,26 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
execute_window_loop(win, [&](const Coordinates &)
{
- const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto non_broadcast_input_ptr = non_broadcast_input.ptr();
+ const auto output_ptr = output.ptr();
- const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
- const uint8x16_t broadcast_value_vec = vdupq_n_u8(broadcast_value);
-
- const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset2)), vscale2);
- const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(broadcast_value_vec))))), voffset2)), vscale2);
- const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset2)), vscale2);
- const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(broadcast_value_vec))))), voffset2)), vscale2);
-
- const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale;
+ const auto broadcast_value = *broadcast_input.ptr();
+ const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
+ const auto bfs = float(broadcast_value) * bf_scale + offset;
// Compute S elements per iteration
int x = window_start_x;
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x);
- const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1);
- const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1);
- const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1);
- const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1);
+
+ const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
+ const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
+
+ const auto af_0 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
int32x4_t rf_0{};
int32x4_t rf_1{};
@@ -109,15 +106,15 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtnq_s32_f32(af_0);
+ rf_1 = vcvtnq_s32_f32(af_1);
+ rf_2 = vcvtnq_s32_f32(af_2);
+ rf_3 = vcvtnq_s32_f32(af_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtq_s32_f32(af_0);
+ rf_1 = vcvtq_s32_f32(af_1);
+ rf_2 = vcvtq_s32_f32(af_2);
+ rf_3 = vcvtq_s32_f32(af_3);
#endif //__aarch64__
const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
@@ -128,8 +125,12 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
- *(output_ptr + x) = quantize_qasymm8((afs + bfs), oq_info);
+ const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
}
},
broadcast_input, non_broadcast_input, output);
@@ -144,16 +145,15 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
Iterator input2(src1, input2_win);
Iterator output(dst, win);
- const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale);
- const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
- const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset);
- const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset);
+ const auto vscale1 = vdupq_n_f32(scale1);
+ const auto vscale2 = vdupq_n_f32(scale2);
+ const auto voffset = vdupq_n_f32(offset);
execute_window_loop(win, [&](const Coordinates &)
{
- const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ const auto input1_ptr = input1.ptr();
+ const auto input2_ptr = input2.ptr();
+ const auto output_ptr = output.ptr();
// Compute S elements per iteration
int x = window_start_x;
@@ -162,15 +162,20 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
const uint8x16_t a = vld1q_u8(input1_ptr + x);
const uint8x16_t b = vld1q_u8(input2_ptr + x);
- const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1);
- const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(a))))), voffset1)), vscale1);
- const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1);
- const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(a))))), voffset1)), vscale1);
+ const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
+ const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
+ const auto b_u16_0 = vmovl_u8(vget_low_u8(b));
+ const auto b_u16_1 = vmovl_u8(vget_high_u8(b));
+
+ const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
- const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2);
- const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(b))))), voffset2)), vscale2);
- const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2);
- const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(b))))), voffset2)), vscale2);
+ const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_0))), vscale2);
+ const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_0))), vscale2);
+ const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_1))), vscale2);
+ const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_1))), vscale2);
int32x4_t rf_0{};
int32x4_t rf_1{};
@@ -178,15 +183,15 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtnq_s32_f32(bf_0);
+ rf_1 = vcvtnq_s32_f32(bf_1);
+ rf_2 = vcvtnq_s32_f32(bf_2);
+ rf_3 = vcvtnq_s32_f32(bf_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtq_s32_f32(bf_0);
+ rf_1 = vcvtq_s32_f32(bf_1);
+ rf_2 = vcvtq_s32_f32(bf_2);
+ rf_3 = vcvtq_s32_f32(bf_3);
#endif //__aarch64__
const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
@@ -197,9 +202,12 @@ void add_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, co
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale;
- const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale;
- *(output_ptr + x) = quantize_qasymm8((afs + bfs), oq_info);
+ const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
}
},
input1, input2, output);
diff --git a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
index d62d0739f5..a285e483ed 100644
--- a/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
+++ b/src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,6 +27,7 @@
#include "arm_compute/core/utils/misc/Traits.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "src/cpu/kernels/add/generic/neon/impl.h"
namespace arm_compute
{
@@ -44,7 +45,7 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
Window win = window;
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- const int window_step_x = 16;
+ constexpr int window_step_x = 16;
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
@@ -53,8 +54,9 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
- const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
- const float32x4_t voffseto = vdupq_n_f32(oq_info.offset);
+ const auto scale1 = iq1_info.scale / oq_info.scale;
+ const auto scale2 = iq2_info.scale / oq_info.scale;
+ const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
if(is_broadcast_across_x)
{
@@ -63,13 +65,10 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
- const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
- const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
- const float32x4_t vscale1 = is_broadcast_input_2 ? vdupq_n_f32(iq1_info.scale) : vdupq_n_f32(iq2_info.scale);
- const float32x4_t vscale2 = is_broadcast_input_2 ? vdupq_n_f32(iq2_info.scale) : vdupq_n_f32(iq1_info.scale);
- const int32x4_t voffset1 = is_broadcast_input_2 ? vdupq_n_s32(iq1_info.offset) : vdupq_n_s32(iq2_info.offset);
- const int32x4_t voffset2 = is_broadcast_input_2 ? vdupq_n_s32(iq2_info.offset) : vdupq_n_s32(iq1_info.offset);
+ const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
+ const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
+ const auto vscale1 = vdupq_n_f32(af_scale);
// Clear X Dimension on execution window as we handle manually
non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
@@ -83,14 +82,9 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
- const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
- const int8x16_t broadcast_value_vec = vdupq_n_s8(broadcast_value);
-
- const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(broadcast_value_vec)))), voffset2)), vscale2);
- const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(broadcast_value_vec)))), voffset2)), vscale2);
- const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(broadcast_value_vec)))), voffset2)), vscale2);
- const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(broadcast_value_vec)))), voffset2)), vscale2);
- const float bfs = static_cast<int32_t>(broadcast_value - broadcast_qinfo.offset) * broadcast_qinfo.scale;
+ const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
+ const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
+ const auto bfs = float(broadcast_value) * bf_scale + offset;
// Compute S elements per iteration
int x = window_start_x;
@@ -98,10 +92,13 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
{
const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
- const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1);
- const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1);
- const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1);
- const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1);
+ const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
+ const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
+
+ const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
int32x4_t rf_0{};
int32x4_t rf_1{};
@@ -109,15 +106,15 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtnq_s32_f32(af_0);
+ rf_1 = vcvtnq_s32_f32(af_1);
+ rf_2 = vcvtnq_s32_f32(af_2);
+ rf_3 = vcvtnq_s32_f32(af_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtq_s32_f32(af_0);
+ rf_1 = vcvtq_s32_f32(af_1);
+ rf_2 = vcvtq_s32_f32(af_2);
+ rf_3 = vcvtq_s32_f32(af_3);
#endif //__aarch64__
const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
@@ -128,8 +125,12 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
- *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), oq_info);
+ const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
}
},
broadcast_input, non_broadcast_input, output);
@@ -144,10 +145,10 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
Iterator input2(src1, input2_win);
Iterator output(dst, win);
- const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale);
- const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale);
- const int32x4_t voffset1 = vdupq_n_s32(iq1_info.offset);
- const int32x4_t voffset2 = vdupq_n_s32(iq2_info.offset);
+ const auto vscale1 = vdupq_n_f32(scale1);
+ const auto vscale2 = vdupq_n_f32(scale2);
+ const auto voffset = vdupq_n_f32(offset);
+
execute_window_loop(win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
@@ -161,15 +162,20 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
const int8x16_t a = vld1q_s8(input1_ptr + x);
const int8x16_t b = vld1q_s8(input2_ptr + x);
- const auto af_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1);
- const auto af_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(a)))), voffset1)), vscale1);
- const auto af_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1);
- const auto af_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(a)))), voffset1)), vscale1);
+ const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
+ const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
+ const auto b_s16_0 = vmovl_s8(vget_low_s8(b));
+ const auto b_s16_1 = vmovl_s8(vget_high_s8(b));
+
+ const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
+ const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
+ const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
+ const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
- const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(b)))), voffset2)), vscale2);
- const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(b)))), voffset2)), vscale2);
- const auto bf_2 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(b)))), voffset2)), vscale2);
- const auto bf_3 = vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(b)))), voffset2)), vscale2);
+ const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2);
+ const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2);
+ const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2);
+ const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2);
int32x4_t rf_0{};
int32x4_t rf_1{};
@@ -177,15 +183,15 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
int32x4_t rf_3{};
#ifdef __aarch64__
- rf_0 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtnq_s32_f32(bf_0);
+ rf_1 = vcvtnq_s32_f32(bf_1);
+ rf_2 = vcvtnq_s32_f32(bf_2);
+ rf_3 = vcvtnq_s32_f32(bf_3);
#else //__aarch64__
- rf_0 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_0, bf_0), invvscaleo));
- rf_1 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_1, bf_1), invvscaleo));
- rf_2 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_2, bf_2), invvscaleo));
- rf_3 = vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af_3, bf_3), invvscaleo));
+ rf_0 = vcvtq_s32_f32(bf_0);
+ rf_1 = vcvtq_s32_f32(bf_1);
+ rf_2 = vcvtq_s32_f32(bf_2);
+ rf_3 = vcvtq_s32_f32(bf_3);
#endif //__aarch64__
const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
@@ -196,9 +202,12 @@ void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *
// Compute left-over elements
for(; x < window_end_x; ++x)
{
- const float afs = static_cast<int32_t>((*(input1_ptr + x)) - iq1_info.offset) * iq1_info.scale;
- const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - iq2_info.offset) * iq2_info.scale;
- *(output_ptr + x) = quantize_qasymm8_signed((afs + bfs), dst->info()->quantization_info());
+ const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
+#ifdef __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
+#else // __aarch64__
+ output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
+#endif // __aarch64__
}
},
input1, input2, output);
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index f94e329c9c..322cd7bea5 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -89,7 +89,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
}
DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL, concat(concat(
- combine(combine(framework::dataset::make("CpuExt", std::string("NEON")),
+ combine(combine(combine(framework::dataset::make("CpuExt", std::string("NEON")),
framework::dataset::make("DataType", { DataType::F32,
DataType::F16,
DataType::U8,
@@ -100,21 +100,24 @@ DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL, concat(concat(
DataType::QSYMM16
})),
framework::dataset::make("CanInterpretAs1D", {true, false})),
- combine(combine(framework::dataset::make("CpuExt", std::string("SVE")),
+ framework::dataset::make("CanUseFixedpoint", {true, false})),
+ combine(combine(combine(framework::dataset::make("CpuExt", std::string("SVE")),
framework::dataset::make("DataType", { DataType::F32,
DataType::F16,
DataType::U8,
DataType::S16,
DataType::S32
})),
- framework::dataset::make("CanInterpretAs1D", {true, false}))),
- combine(combine(framework::dataset::make("CpuExt", std::string("SVE2")),
+ framework::dataset::make("CanInterpretAs1D", {true, false})),
+ framework::dataset::make("CanUseFixedpoint", {true, false}))),
+ combine(combine(combine(framework::dataset::make("CpuExt", std::string("SVE2")),
framework::dataset::make("DataType", { DataType::QASYMM8,
DataType::QASYMM8_SIGNED,
DataType::QSYMM16
})),
- framework::dataset::make("CanInterpretAs1D", {false}))),
- cpu_ext, data_type, can_interpret_inputs_as_1d_array)
+ framework::dataset::make("CanInterpretAs1D", {false})),
+ framework::dataset::make("CanUseFixedpoint", {true, false}))),
+ cpu_ext, data_type, can_interpret_inputs_as_1d_array, can_use_fixedpoint)
{
using namespace cpu::kernels;
@@ -124,18 +127,23 @@ DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL, concat(concat(
cpu_isa.sve2 = (cpu_ext == "SVE2");
cpu_isa.fp16 = (data_type == DataType::F16);
- const auto *selected_impl = CpuAddKernel::get_implementation(CpuAddKernelDataTypeISASelectorData{data_type, cpu_isa, can_interpret_inputs_as_1d_array}, cpu::KernelSelectionType::Preferred);
+ const auto *selected_impl = CpuAddKernel::get_implementation(CpuAddKernelDataTypeISASelectorData{data_type, cpu_isa, can_interpret_inputs_as_1d_array, can_use_fixedpoint}, cpu::KernelSelectionType::Preferred);
ARM_COMPUTE_ERROR_ON_NULLPTR(selected_impl);
bool float_or_integer = (data_type == DataType::F32 || data_type == DataType::F16 || data_type == DataType::U8 ||
data_type == DataType::S16 || data_type == DataType::S32);
+ bool qasymm8_any = (data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED);
std::string expected;
if(can_interpret_inputs_as_1d_array && float_or_integer)
{
expected = "neon_" + cpu_impl_dt(data_type) + "_add_as_1d_array";
}
+ else if(qasymm8_any && can_use_fixedpoint)
+ {
+ expected = "neon_" + cpu_impl_dt(data_type) + "_add_fixedpoint";
+ }
else
{
expected = lower_string(cpu_ext) + "_" + cpu_impl_dt(data_type) + "_add";