aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOmar Al Khatib <omar.alkhatib@arm.com>2022-11-01 17:01:24 +0000
committerOmar Al Khatib <omar.alkhatib@arm.com>2022-11-07 15:45:21 +0000
commit605a928960c0c36384925a9064d12addc8f43a41 (patch)
tree71896f7d4683751c7c87c5606b95e0c9c421384a
parenta2b131bf4680f83b10a7e3544b6183279d8c2691 (diff)
downloadComputeLibrary-605a928960c0c36384925a9064d12addc8f43a41.tar.gz
Optimize CPU mul layer on quantized data
Resolves : [COMPMID-5461] Signed-off-by: Omar Al Khatib <omar.alkhatib@arm.com> Change-Id: I89b99d267c32b00ef44f9bb6e7c714dfe4a0d29d Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8420 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/NEON/wrapper/intrinsics/shr.h28
-rw-r--r--src/core/NEON/wrapper/intrinsics/store.h6
-rw-r--r--src/core/NEON/wrapper/intrinsics/sub.h17
-rw-r--r--src/cpu/kernels/CpuMulKernel.cpp328
4 files changed, 345 insertions, 34 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/shr.h b/src/core/NEON/wrapper/intrinsics/shr.h
index d740091464..e41e9b8b31 100644
--- a/src/core/NEON/wrapper/intrinsics/shr.h
+++ b/src/core/NEON/wrapper/intrinsics/shr.h
@@ -25,21 +25,19 @@
#ifndef ARM_COMPUTE_WRAPPER_SHR_H
#define ARM_COMPUTE_WRAPPER_SHR_H
-#include <type_traits>
#include <arm_neon.h>
+#include <type_traits>
namespace arm_compute
{
namespace wrapper
{
-
#define VQRSHRN_IMPL(half_vtype, vtype, prefix, postfix) \
template <int b> \
inline half_vtype vqrshrn(const vtype &a) \
{ \
return prefix##_##postfix(a, b); \
}
-
VQRSHRN_IMPL(int8x8_t, int16x8_t, vqrshrn_n, s16)
VQRSHRN_IMPL(uint8x8_t, uint16x8_t, vqrshrn_n, u16)
VQRSHRN_IMPL(int16x4_t, int32x4_t, vqrshrn_n, s32)
@@ -77,20 +75,38 @@ VQRSHRN_SCALAR_IMPL(uint32_t, uint64_t, vqrshrnd_n, u64)
{ \
return prefix_signed##_##postfix(a, b); \
} \
- \
+ \
template <int b, typename T> \
inline typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, u##half_vtype>::type \
vqrshrn_ex(const vtype &a) \
{ \
return prefix_unsigned##_##postfix(a, b); \
}
-
VQRSHRN_EX_IMPL(int8x8_t, int16x8_t, vqrshrn_n, vqrshrun_n, s16)
VQRSHRN_EX_IMPL(int16x4_t, int32x4_t, vqrshrn_n, vqrshrun_n, s32)
VQRSHRN_EX_IMPL(int32x2_t, int64x2_t, vqrshrn_n, vqrshrun_n, s64)
-
#undef VQRSHRN_EX_IMPL
+#define VSHR_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshr_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHR_IMPL(uint8x8_t, vshr_n, u8)
+VSHR_IMPL(int8x8_t, vshr_n, s8)
+#undef VSHR_IMPL
+
+#define VSHRQ_IMPL(vtype, prefix, postfix) \
+ template <int b> \
+ inline vtype vshrq_n(const vtype &a) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+VSHRQ_IMPL(uint32x4_t, vshrq_n, u32)
+VSHRQ_IMPL(int32x4_t, vshrq_n, s32)
+#undef VSHRQ_IMPL
+
#ifdef __aarch64__
#define VQRSHRN_EX_SCALAR_IMPL(half_vtype, vtype, prefix_signed, prefix_unsigned, postfix) \
template <int b, typename T> \
diff --git a/src/core/NEON/wrapper/intrinsics/store.h b/src/core/NEON/wrapper/intrinsics/store.h
index 6dda432ea9..ce1b9a554e 100644
--- a/src/core/NEON/wrapper/intrinsics/store.h
+++ b/src/core/NEON/wrapper/intrinsics/store.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,8 +44,6 @@ VSTORE_IMPL(uint16_t, uint16x4_t, vst1, u16)
VSTORE_IMPL(int16_t, int16x4_t, vst1, s16)
VSTORE_IMPL(uint32_t, uint32x2_t, vst1, u32)
VSTORE_IMPL(int32_t, int32x2_t, vst1, s32)
-//VSTORE_IMPL(uint64_t, 1, vst1, u64)
-//VSTORE_IMPL(int64_t, 1, vst1, s64)
VSTORE_IMPL(float, float32x2_t, vst1, f32)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
VSTORE_IMPL(float16_t, float16x4_t, vst1, f16)
@@ -57,8 +55,6 @@ VSTORE_IMPL(uint16_t, uint16x8_t, vst1q, u16)
VSTORE_IMPL(int16_t, int16x8_t, vst1q, s16)
VSTORE_IMPL(uint32_t, uint32x4_t, vst1q, u32)
VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32)
-//VSTORE_IMPL(uint64_t, 2, vst1q, u64)
-//VSTORE_IMPL(int64_t, 2, vst1q, s64)
VSTORE_IMPL(float, float32x4_t, vst1q, f32)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
VSTORE_IMPL(float16_t, float16x8_t, vst1q, f16)
diff --git a/src/core/NEON/wrapper/intrinsics/sub.h b/src/core/NEON/wrapper/intrinsics/sub.h
index 475986d0f6..20436714ef 100644
--- a/src/core/NEON/wrapper/intrinsics/sub.h
+++ b/src/core/NEON/wrapper/intrinsics/sub.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,6 +98,21 @@ VQSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VQSUB_IMPL
+#define VSUBL_IMPL(rtype, vtype, prefix, postfix) \
+ inline rtype vsubl(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VSUBL_IMPL(int16x8_t, int8x8_t, vsubl, s8)
+VSUBL_IMPL(int32x4_t, int16x4_t, vsubl, s16)
+VSUBL_IMPL(int64x2_t, int32x2_t, vsubl, s32)
+VSUBL_IMPL(uint16x8_t, uint8x8_t, vsubl, u8)
+VSUBL_IMPL(uint32x4_t, uint16x4_t, vsubl, u16)
+VSUBL_IMPL(uint64x2_t, uint32x2_t, vsubl, u32)
+
+#undef VSUB_IMPL
+
} // namespace wrapper
} // namespace arm_compute
#endif /* ARM_COMPUTE_WRAPPER_SUB_H */
diff --git a/src/cpu/kernels/CpuMulKernel.cpp b/src/cpu/kernels/CpuMulKernel.cpp
index 2f04bf9f26..487954b889 100644
--- a/src/cpu/kernels/CpuMulKernel.cpp
+++ b/src/cpu/kernels/CpuMulKernel.cpp
@@ -241,7 +241,8 @@ void mul_saturate_quantized_8(const ITensor *src1, const ITensor *src2, ITensor
Iterator input2(src2, input2_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const T *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const T *>(input2.ptr());
@@ -290,6 +291,262 @@ void mul_saturate_quantized_8(const ITensor *src1, const ITensor *src2, ITensor
}
}
+bool mul_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, float scale)
+{
+ const auto iq0 = src0->quantization_info().uniform();
+ const auto iq1 = src1->quantization_info().uniform();
+ const auto oq = dst->quantization_info().uniform();
+
+ const auto multiplier = ((iq0.scale * iq1.scale) / oq.scale) * scale;
+
+ if(multiplier < -8191.f || multiplier > 8191.f)
+ {
+ //The multiplier cannot be stored as a 14.18 signed fixed-point number
+ return false;
+ }
+
+ const auto offset_out = float(oq.offset);
+
+ const auto max_result = multiplier * (256) * (256) + offset_out;
+
+ if(max_result > 8191.f)
+ {
+ //It might not be possible to store the result as a 14.18 signed fixed-point number.
+ return false;
+ }
+
+ return true;
+}
+
+template <typename ScalarType>
+void mul_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const Window &window, float scale)
+{
+ const auto in0_info = src0->info();
+ const auto in1_info = src1->info();
+
+ const auto &in0_shape = in0_info->tensor_shape();
+ const auto &in1_shape = in1_info->tensor_shape();
+
+ // Create input windows.
+ Window in0_win = window.broadcast_if_dimension_le_one(in0_shape);
+ Window in1_win = window.broadcast_if_dimension_le_one(in1_shape);
+
+ // Clear the x dimension on the execution window as we process the whole row each iteration.
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ constexpr int window_step_x = 16;
+ const auto window_start_x = window.x().start();
+ const auto window_end_x = window.x().end();
+ const auto is_broadcast_across_x = in0_shape.x() != in1_shape.x();
+
+ const auto iq0_info = in0_info->quantization_info().uniform();
+ const auto iq1_info = in1_info->quantization_info().uniform();
+ const auto oq_info = dst->info()->quantization_info().uniform();
+
+ const auto in0_offset = iq0_info.offset;
+ const auto in1_offset = iq1_info.offset;
+ const auto out_offset = oq_info.offset;
+ const auto multiplier = ((iq0_info.scale * iq1_info.scale) / oq_info.scale) * scale;
+
+ constexpr int32_t two_pwr18i = 262144;
+ constexpr float two_pwr18f = 262144.f;
+
+ const auto in0_offset_16p0 = static_cast<int16_t>(in0_offset);
+ const auto in1_offset_16p0 = static_cast<int16_t>(in1_offset);
+ const auto out_offset_14p18 = static_cast<int32_t>(out_offset * two_pwr18i);
+ const auto multiplier_14p18 = static_cast<int32_t>(multiplier * two_pwr18f);
+
+ if(is_broadcast_across_x)
+ {
+ // Prefix: a = non-broadcast, b = broadcast.
+
+ const auto is_broadcast_input_1 = in1_win.x().step() == 0;
+ auto a_win = is_broadcast_input_1 ? in0_win : in1_win;
+ auto b_win = is_broadcast_input_1 ? in1_win : in0_win;
+ const auto a_tensor = is_broadcast_input_1 ? src0 : src1;
+ const auto b_tensor = is_broadcast_input_1 ? src1 : src0;
+
+ const auto a_offset_16p0 = is_broadcast_input_1 ? in0_offset_16p0 : in1_offset_16p0;
+ const auto b_offset_16p0 = is_broadcast_input_1 ? in1_offset : in0_offset;
+#ifndef __aarch64__
+ const auto a_offset = is_broadcast_input_1 ? in0_offset : in1_offset;
+ const auto b_offset = is_broadcast_input_1 ? in1_offset : in0_offset;
+#endif //__aarch64__
+ const auto a_voffset_16p0 = wrapper::vdup_n(a_offset_16p0, wrapper::traits::vector_64_tag());
+
+ // Clear the x dimension on the execution window as we process the whole row each iteration.
+ a_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator a_input_it(a_tensor, a_win);
+ Iterator b_input_it(b_tensor, b_win);
+ Iterator out_it(dst, win);
+
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
+ const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+ const auto b_val = *b_ptr;
+ const auto b_offseted_32p0 = static_cast<int32_t>(b_val - b_offset_16p0);
+ const auto b_voffseted_32p0 = wrapper::vdup_n(b_offseted_32p0, wrapper::traits::vector_128_tag());
+
+ const auto vmultiplier_14p18 = wrapper::vdup_n(multiplier_14p18, wrapper::traits::vector_128_tag());
+ const auto voffsetout_14p18 = wrapper::vdup_n(out_offset_14p18, wrapper::traits::vector_128_tag());
+
+ int x = window_start_x;
+
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Load the inputs.
+ const auto a_vin_8p0 = wrapper::vloadq(a_ptr + x);
+
+ // Widen the non-broadcast elements to signed 16-bit regardless of the input signedness.
+ const auto a_vin_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(a_vin_8p0)));
+ const auto a_vin_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(a_vin_8p0)));
+
+ const auto voffseted_32p0_00 = wrapper::vsubl(wrapper::vgetlow(a_vin_16p0_0), a_voffset_16p0);
+ const auto voffseted_32p0_01 = wrapper::vsubl(wrapper::vgethigh(a_vin_16p0_0), a_voffset_16p0);
+ const auto voffseted_32p0_10 = wrapper::vsubl(wrapper::vgetlow(a_vin_16p0_1), a_voffset_16p0);
+ const auto voffseted_32p0_11 = wrapper::vsubl(wrapper::vgethigh(a_vin_16p0_1), a_voffset_16p0);
+
+ const auto vinnermul_32p0_00 = wrapper::vmul(voffseted_32p0_00, b_voffseted_32p0);
+ const auto vinnermul_32p0_01 = wrapper::vmul(voffseted_32p0_01, b_voffseted_32p0);
+ const auto vinnermul_32p0_10 = wrapper::vmul(voffseted_32p0_10, b_voffseted_32p0);
+ const auto vinnermul_32p0_11 = wrapper::vmul(voffseted_32p0_11, b_voffseted_32p0);
+
+ const auto vout_14p18_00 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_00, vmultiplier_14p18);
+ const auto vout_14p18_01 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_01, vmultiplier_14p18);
+ const auto vout_14p18_10 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_10, vmultiplier_14p18);
+ const auto vout_14p18_11 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_11, vmultiplier_14p18);
+
+ // These shift rights are to revert the multiplication by twopwr18. Hard limit of a maximum shift by 8 requires multiple shift instructions to achieve this.
+ const auto vout_15p1_00 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_00));
+ const auto vout_15p1_01 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_01));
+ const auto vout_15p1_10 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_10));
+ const auto vout_15p1_11 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_11));
+
+ const auto vout_15p1_0 = wrapper::vcombine(
+ vout_15p1_00,
+ vout_15p1_01);
+
+ const auto vout_15p1_1 = wrapper::vcombine(
+ vout_15p1_10,
+ vout_15p1_11);
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+ const auto vout_8p0 = wrapper::vcombine(
+ wrapper::vqrshrn<2>(vout_15p1_0),
+ wrapper::vqrshrn<2>(vout_15p1_1));
+ wrapper::vstore(out_ptr + x, vout_8p0);
+ }
+
+ //Process the left-over elements.
+ for(; x < window_end_x; ++x)
+ {
+#ifdef __aarch64__
+ out_ptr[x] = wrapper::vqrshrn<2>(wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<8, ScalarType>((multiplier_14p18 * (int32_t(a_ptr[x]) - a_offset_16p0) * (int32_t(
+ b_val) - b_offset_16p0)) + out_offset_14p18)));
+#else //__aarch64__
+ out_ptr[x] = utility::clamp<int32_t, ScalarType>(support::cpp11::lround(multiplier * ((float(a_ptr[x]) - a_offset) * (float(b_val) - b_offset)) + float(out_offset)));
+#endif //__aarch64__
+ }
+ },
+ a_input_it, b_input_it, out_it);
+ }
+ else
+ {
+ const auto voffset0_16p0 = wrapper::vdup_n(in0_offset_16p0, wrapper::traits::vector_64_tag());
+ const auto voffset1_16p0 = wrapper::vdup_n(in1_offset_16p0, wrapper::traits::vector_64_tag());
+ const auto voffsetout_14p18 = wrapper::vdup_n(out_offset_14p18, wrapper::traits::vector_128_tag());
+ const auto vmultiplier_14p18 = wrapper::vdup_n(multiplier_14p18, wrapper::traits::vector_128_tag());
+
+ // Clear the x dimension on the execution window as we process the whole row each iteration.
+ in0_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ in1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in0_it(src0, in0_win);
+ Iterator in1_it(src1, in1_win);
+ Iterator out_it(dst, win);
+
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr());
+ const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr());
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+ int x = window_start_x;
+
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Load the inputs.
+ const auto vin0_8p0 = wrapper::vloadq(in0_ptr + x);
+ const auto vin1_8p0 = wrapper::vloadq(in1_ptr + x);
+
+ // Widen the input elements to signed 16-bit regardless of the input signedness.
+ const auto vin0_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin0_8p0)));
+ const auto vin0_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin0_8p0)));
+ const auto vin1_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin1_8p0)));
+ const auto vin1_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin1_8p0)));
+
+ const auto voffseted0_32p0_00 = wrapper::vsubl(wrapper::vgetlow(vin0_16p0_0), voffset0_16p0);
+ const auto voffseted0_32p0_01 = wrapper::vsubl(wrapper::vgethigh(vin0_16p0_0), voffset0_16p0);
+ const auto voffseted0_32p0_10 = wrapper::vsubl(wrapper::vgetlow(vin0_16p0_1), voffset0_16p0);
+ const auto voffseted0_32p0_11 = wrapper::vsubl(wrapper::vgethigh(vin0_16p0_1), voffset0_16p0);
+
+ const auto voffseted1_32p0_00 = wrapper::vsubl(wrapper::vgetlow(vin1_16p0_0), voffset1_16p0);
+ const auto voffseted1_32p0_01 = wrapper::vsubl(wrapper::vgethigh(vin1_16p0_0), voffset1_16p0);
+ const auto voffseted1_32p0_10 = wrapper::vsubl(wrapper::vgetlow(vin1_16p0_1), voffset1_16p0);
+ const auto voffseted1_32p0_11 = wrapper::vsubl(wrapper::vgethigh(vin1_16p0_1), voffset1_16p0);
+
+ const auto vinnermul_32p0_00 = wrapper::vmul(voffseted0_32p0_00, voffseted1_32p0_00);
+ const auto vinnermul_32p0_01 = wrapper::vmul(voffseted0_32p0_01, voffseted1_32p0_01);
+ const auto vinnermul_32p0_10 = wrapper::vmul(voffseted0_32p0_10, voffseted1_32p0_10);
+ const auto vinnermul_32p0_11 = wrapper::vmul(voffseted0_32p0_11, voffseted1_32p0_11);
+
+ const auto vout_14p18_00 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_00, vmultiplier_14p18);
+ const auto vout_14p18_01 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_01, vmultiplier_14p18);
+ const auto vout_14p18_10 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_10, vmultiplier_14p18);
+ const auto vout_14p18_11 = wrapper::vmla(voffsetout_14p18, vinnermul_32p0_11, vmultiplier_14p18);
+
+ // These shift rights are to revert the multiplication by twopwr18. Hard limit of a maximum shift by 8 requires multiple shift instructions to achieve this.
+ const auto vout_14p2_00 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_00));
+ const auto vout_14p2_01 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_01));
+ const auto vout_14p2_10 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_10));
+ const auto vout_14p2_11 = wrapper::vqrshrn_ex<8, ScalarType>(wrapper::vshrq_n<8>(vout_14p18_11));
+
+ const auto vout_14p2_0 = wrapper::vcombine(
+ vout_14p2_00,
+ vout_14p2_01);
+
+ const auto vout_14p2_1 = wrapper::vcombine(
+ vout_14p2_10,
+ vout_14p2_11);
+
+ const auto vout_8p0 = wrapper::vcombine(
+ wrapper::vqrshrn<2>(vout_14p2_0),
+ wrapper::vqrshrn<2>(vout_14p2_1));
+ wrapper::vstore(out_ptr + x, vout_8p0);
+ }
+
+ //Process the left-over elements.
+ for(; x < window_end_x; ++x)
+ {
+#ifdef __aarch64__
+ out_ptr[x] = wrapper::vqrshrn<2>(wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<8, ScalarType>((multiplier_14p18 * (int32_t(in0_ptr[x]) - in0_offset_16p0) * (int32_t(
+ in1_ptr[x]) - in1_offset_16p0)) + out_offset_14p18)));
+#else //__aarch64__
+ out_ptr[x] = utility::clamp<int32_t, ScalarType>(support::cpp11::lround(multiplier * ((float(in0_ptr[x]) - in0_offset) * (float(in1_ptr[x]) - in1_offset)) + float(out_offset)));
+#endif //__aarch64__
+ }
+ },
+ in0_it, in1_it, out_it);
+ }
+}
+
void mul_saturate_QSYMM16_QSYMM16_QSYMM16(const ITensor *src1, const ITensor *src2, ITensor *out, const Window &window, float scale)
{
const UniformQuantizationInfo input1_qua_info = src1->info()->quantization_info().uniform();
@@ -316,7 +573,8 @@ void mul_saturate_QSYMM16_QSYMM16_QSYMM16(const ITensor *src1, const ITensor *sr
const UniformQuantizationInfo tmp_qua_info = { output_qua_info.scale / scale, output_qua_info.offset };
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const qsymm16_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const qsymm16_t *>(input2.ptr());
@@ -397,7 +655,8 @@ void mul_QSYMM16_QSYMM16_S32(const ITensor *src1, const ITensor *src2, ITensor *
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const qsymm16_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const qsymm16_t *>(input2.ptr());
@@ -488,7 +747,8 @@ void mul_U8_U8_U8(const ITensor *src1, const ITensor *src2, ITensor *out, const
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
@@ -653,7 +913,8 @@ void mul_S16_S16_S16(const ITensor *src1, const ITensor *src2, ITensor *out, con
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr());
@@ -716,7 +977,7 @@ void mul_S16_S16_S16(const ITensor *src1, const ITensor *src2, ITensor *out, con
input1, input2, dst);
}
-template <bool is_sat>
+template <bool is_sat>
inline int32x4_t mul_S32_S32_S32_n_loop(const int32x4_t &src1, const int32x4_t &src2, int n)
{
const int32x2_t input1_1 = vget_low_s32(src1);
@@ -756,7 +1017,7 @@ inline int32x4_t mul_S32_S32_S32_n_loop(const int32x4_t &src1, const int32x4_t &
}
}
-template <bool is_sat>
+template <bool is_sat>
inline int32x4x2_t mul_S32_S32_S32_n_k(const int32x4x2_t &src1, const int32x4x2_t &src2, int n)
{
const int32x4x2_t result =
@@ -803,7 +1064,8 @@ void mul_S32_S32_S32(const ITensor *src1, const ITensor *src2, ITensor *out, con
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const int32_t *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<int32_t *>(dst.ptr());
@@ -868,7 +1130,8 @@ void mul_S32_S32_S32(const ITensor *src1, const ITensor *src2, ITensor *out, con
Iterator input2(src2, input2_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const int32_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const int32_t *>(input2.ptr());
@@ -955,7 +1218,8 @@ void mul_F32_F32_F32(const ITensor *src1, const ITensor *src2, ITensor *out, con
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const float *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<float *>(dst.ptr());
@@ -992,7 +1256,8 @@ void mul_F32_F32_F32(const ITensor *src1, const ITensor *src2, ITensor *out, con
Iterator input2(src2, input2_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const float *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const float *>(input2.ptr());
@@ -1053,7 +1318,8 @@ void c_mul_F32_F32_F32_n(const ITensor *src1, const ITensor *src2, ITensor *out,
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const float *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<float *>(dst.ptr());
@@ -1106,7 +1372,8 @@ void c_mul_F32_F32_F32_n(const ITensor *src1, const ITensor *src2, ITensor *out,
Iterator input2(src2, input2_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const float *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const float *>(input2.ptr());
@@ -1180,12 +1447,13 @@ void mul_F16_F16_F16(const ITensor *src1, const ITensor *src2, ITensor *out, con
Iterator broadcast_input(broadcast_tensor, broadcast_win);
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const float16_t *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<float16_t *>(dst.ptr());
const auto broadcast_value = *reinterpret_cast<const float16_t *>(broadcast_input.ptr());
- const float16x8x2_t broadcast_value_vec =
+ const float16x8x2_t broadcast_value_vec =
{
{
vdupq_n_f16(broadcast_value),
@@ -1230,7 +1498,8 @@ void mul_F16_F16_F16(const ITensor *src1, const ITensor *src2, ITensor *out, con
Iterator input1(src1, input1_win);
Iterator input2(src2, input2_win);
Iterator dst(out, win);
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const float16_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const float16_t *>(input2.ptr());
@@ -1254,7 +1523,7 @@ void mul_F16_F16_F16(const ITensor *src1, const ITensor *src2, ITensor *out, con
}
};
const float16x8_t scale_vec = vdupq_n_f16(scale);
- const float16x8x2_t result =
+ const float16x8x2_t result =
{
{
vmulq_f16(vmulq_f16(ta1.val[0], ta2.val[0]), scale_vec),
@@ -1298,7 +1567,8 @@ void mul_U8_U8_S16(const ITensor *src1, const ITensor *src2, ITensor *out, const
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
@@ -1396,7 +1666,8 @@ void mul_S16_U8_S16(const ITensor *src1, const ITensor *src2, ITensor *out, cons
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
- execute_window_loop(win, [&](const Coordinates &)
+ execute_window_loop(
+ win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
@@ -1520,14 +1791,27 @@ void CpuMulKernel::configure(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *
case DataType::QASYMM8:
if(dt_input2 == DataType::QASYMM8 && dt_output == DataType::QASYMM8)
{
- _func_quantized = &mul_saturate_quantized_8<uint8_t>;
+ if(mul_q8_neon_fixedpoint_possible(src1, src2, dst, scale))
+ {
+ _func_quantized = &mul_q8_neon_fixedpoint<uint8_t>;
+ }
+ else
+ {
+ _func_quantized = &mul_saturate_quantized_8<uint8_t>;
+ }
}
break;
case DataType::QASYMM8_SIGNED:
if(dt_input2 == DataType::QASYMM8_SIGNED)
{
- _func_quantized = &mul_saturate_quantized_8<int8_t>;
- ;
+ if(mul_q8_neon_fixedpoint_possible(src1, src2, dst, scale))
+ {
+ _func_quantized = &mul_q8_neon_fixedpoint<int8_t>;
+ }
+ else
+ {
+ _func_quantized = &mul_saturate_quantized_8<int8_t>;
+ }
}
break;
case DataType::QSYMM16: