aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2020-09-24 17:34:23 +0100
committerSiCong Li <sicong.li@arm.com>2020-09-29 10:42:00 +0000
commitd6d1b3682a2cdd54bae5498635b108a4b19a045a (patch)
tree21bdc01db4defe20f14c10eee67583aee8670c3b
parent017ead22de327b1e46882e423558e5698d8da90e (diff)
downloadComputeLibrary-d6d1b3682a2cdd54bae5498635b108a4b19a045a.tar.gz
COMPMID-3784 Add broadcast support to S32 NEPixelwiseMultiplication
Signed-off-by: SiCong Li <sicong.li@arm.com> Change-Id: Ifae31c74eb46c561225394a387fc15332423bfa9 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4030 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h43
-rw-r--r--arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h42
-rw-r--r--src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp168
-rw-r--r--tests/validation/NEON/PixelWiseMultiplication.cpp7
4 files changed, 172 insertions, 88 deletions
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index c65f788091..6221d61f49 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -55,16 +55,17 @@ public:
*
* Valid configurations (Input1,Input2) -> Output :
*
- * - (U8,U8) -> U8, S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16, S32
+ * Support: Broadcast? Scale=1/255?
+ * - (U8,U8) -> U8, S16 N Y
+ * - (U8,S16) -> S16 N Y
+ * - (S16,U8) -> S16 N Y
+ * - (S16,S16) -> S16 N Y
+ * - (S32,S32) -> S32 Y N
+ * - (F16,F16) -> F16 N Y
+ * - (F32,F32) -> F32 Y Y
+ * - (QASYMM8,QASYMM8) -> QASYMM8 Y Y
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED Y Y
+ * - (QSYMM16,QSYMM16) -> QSYMM16, S32 N Y
*
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
@@ -82,17 +83,17 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplicationKernel
*
* Valid configurations (Input1,Input2) -> Output :
- *
- * - (U8,U8) -> U8, S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16, S32
+ * Support: Broadcast? Scale=1/255?
+ * - (U8,U8) -> U8, S16 N Y
+ * - (U8,S16) -> S16 N Y
+ * - (S16,U8) -> S16 N Y
+ * - (S16,S16) -> S16 N Y
+ * - (S32,S32) -> S32 Y N
+ * - (F16,F16) -> F16 N Y
+ * - (F32,F32) -> F32 Y Y
+ * - (QASYMM8,QASYMM8) -> QASYMM8 Y Y
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED Y Y
+ * - (QSYMM16,QSYMM16) -> QSYMM16, S32 N Y
*
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index 4ff7f1d112..e1072980cf 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -42,16 +42,17 @@ public:
*
* Valid configurations (Input1,Input2) -> Output :
*
- * - (U8,U8) -> U8, S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16, S32
+ * Support: Broadcast? Scale=1/255?
+ * - (U8,U8) -> U8, S16 N Y
+ * - (U8,S16) -> S16 N Y
+ * - (S16,U8) -> S16 N Y
+ * - (S16,S16) -> S16 N Y
+ * - (S32,S32) -> S32 Y N
+ * - (F16,F16) -> F16 N Y
+ * - (F32,F32) -> F32 Y Y
+ * - (QASYMM8,QASYMM8) -> QASYMM8 Y Y
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED Y Y
+ * - (QSYMM16,QSYMM16) -> QSYMM16, S32 N Y
*
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
@@ -74,16 +75,17 @@ public:
*
* Valid configurations (Input1,Input2) -> Output :
*
- * - (U8,U8) -> U8, S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16, S32
+ * Support: Broadcast? Scale=1/255?
+ * - (U8,U8) -> U8, S16 N Y
+ * - (U8,S16) -> S16 N Y
+ * - (S16,U8) -> S16 N Y
+ * - (S16,S16) -> S16 N Y
+ * - (S32,S32) -> S32 Y N
+ * - (F16,F16) -> F16 N Y
+ * - (F32,F32) -> F32 Y Y
+ * - (QASYMM8,QASYMM8) -> QASYMM8 Y Y
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED Y Y
+ * - (QSYMM16,QSYMM16) -> QSYMM16, S32 N Y
*
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
diff --git a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
index 302ee7694f..84683ea69f 100644
--- a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
+++ b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
@@ -773,75 +773,151 @@ template <bool is_sat>
void mul_S32_S32_S32(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, int n)
{
// Create input windows
- Window win = window;
Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
// Clear X Dimension on execution window as we handle manually
+ Window win = window;
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
- input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input1(in1, input1_win);
- Iterator input2(in2, input2_win);
- Iterator output(out, win);
- const int window_step_x = 8;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
+ const int window_step_x = 8;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+ const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0);
- execute_window_loop(win, [&](const Coordinates &)
+ if(is_broadcast_across_x)
{
- const auto input1_ptr = reinterpret_cast<const int32_t *>(input1.ptr());
- const auto input2_ptr = reinterpret_cast<const int32_t *>(input2.ptr());
- const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
+ const bool is_broadcast_input_2 = input2_win.x().step() == 0;
+ Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
+ Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
+ const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
+ const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
- // Compute window_step_x elements per iteration
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ // Clear X Dimension on execution window as we handle manually
+ non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator broadcast_input(broadcast_tensor, broadcast_win);
+ Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
+ Iterator output(out, win);
+
+ execute_window_loop(win, [&](const Coordinates &)
{
- const int32x4x2_t ta1 =
+ const auto non_broadcast_input_ptr = reinterpret_cast<const int32_t *>(non_broadcast_input.ptr());
+ const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
+
+ const int32_t broadcast_value = *reinterpret_cast<const int32_t *>(broadcast_input.ptr());
+ const auto broadcast_value_vec = vdupq_n_s32(broadcast_value);
+
+ // Compute window_step_x elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
+ const int32x4x2_t broadcast_v =
{
- vld1q_s32(input1_ptr + x),
- vld1q_s32(input1_ptr + x + 4),
- }
- };
- const int32x4x2_t ta2 =
+ {
+ broadcast_value_vec,
+ broadcast_value_vec,
+ }
+ };
+ const int32x4x2_t non_broadcast_v =
+ {
+ {
+ vld1q_s32(non_broadcast_input_ptr + x),
+ vld1q_s32(non_broadcast_input_ptr + x + 4),
+ }
+ };
+ const int32x4x2_t result = mul_S32_S32_S32_n_k<is_sat>(broadcast_v, non_broadcast_v, n);
+
+ vst1q_s32(output_ptr + x, result.val[0]);
+ vst1q_s32(output_ptr + x + 4, result.val[1]);
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
{
+ int64_t tmp = static_cast<int64_t>(broadcast_value) * static_cast<int64_t>(*(non_broadcast_input_ptr + x));
+
+ if(tmp >= 0)
{
- vld1q_s32(input2_ptr + x),
- vld1q_s32(input2_ptr + x + 4),
+ tmp >>= n;
}
- };
- const int32x4x2_t result = mul_S32_S32_S32_n_k<is_sat>(ta1, ta2, n);
+ else
+ {
+ uint64_t mask = (1u << n) - 1;
+ tmp = (tmp + static_cast<int64_t>(mask)) >> n;
+ }
+ if(is_sat)
+ {
+ tmp = utility::clamp<int64_t, int32_t>(tmp);
+ }
+ *(output_ptr + x) = static_cast<int32_t>(tmp);
+ }
+ },
+ broadcast_input, non_broadcast_input, output);
+ }
+ else
+ {
+ // Clear X Dimension on execution window as we handle manually
+ input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
- vst1q_s32(output_ptr + x, result.val[0]);
- vst1q_s32(output_ptr + x + 4, result.val[1]);
- }
+ Iterator input1(in1, input1_win);
+ Iterator input2(in2, input2_win);
+ Iterator output(out, win);
- // Compute left-over elements
- for(; x < window_end_x; ++x)
+ execute_window_loop(win, [&](const Coordinates &)
{
- int64_t tmp = static_cast<int64_t>(*(input1_ptr + x)) * static_cast<int64_t>(*(input2_ptr + x));
+ const auto input1_ptr = reinterpret_cast<const int32_t *>(input1.ptr());
+ const auto input2_ptr = reinterpret_cast<const int32_t *>(input2.ptr());
+ const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
- if(tmp >= 0)
- {
- tmp >>= n;
- }
- else
+ // Compute window_step_x elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
- uint64_t mask = (1u << n) - 1;
- tmp = (tmp + static_cast<int64_t>(mask)) >> n;
+ const int32x4x2_t ta1 =
+ {
+ {
+ vld1q_s32(input1_ptr + x),
+ vld1q_s32(input1_ptr + x + 4),
+ }
+ };
+ const int32x4x2_t ta2 =
+ {
+ {
+ vld1q_s32(input2_ptr + x),
+ vld1q_s32(input2_ptr + x + 4),
+ }
+ };
+ const int32x4x2_t result = mul_S32_S32_S32_n_k<is_sat>(ta1, ta2, n);
+
+ vst1q_s32(output_ptr + x, result.val[0]);
+ vst1q_s32(output_ptr + x + 4, result.val[1]);
}
- if(is_sat)
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
{
- tmp = (tmp > INT_MAX) ? INT_MAX : ((tmp < INT_MIN) ? INT_MIN : tmp);
+ int64_t tmp = static_cast<int64_t>(*(input1_ptr + x)) * static_cast<int64_t>(*(input2_ptr + x));
+
+ if(tmp >= 0)
+ {
+ tmp >>= n;
+ }
+ else
+ {
+ uint64_t mask = (1u << n) - 1;
+ tmp = (tmp + static_cast<int64_t>(mask)) >> n;
+ }
+ if(is_sat)
+ {
+ tmp = utility::clamp<int64_t, int32_t>(tmp);
+ }
+ *(output_ptr + x) = static_cast<int32_t>(tmp);
}
- *(output_ptr + x) = static_cast<int32_t>(tmp);
- }
- },
- input1, input2, output);
+ },
+ input1, input2, output);
+ }
}
void mul_F32_F32_F32(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, float scale)
diff --git a/tests/validation/NEON/PixelWiseMultiplication.cpp b/tests/validation/NEON/PixelWiseMultiplication.cpp
index a66f6f192f..1bb0588919 100644
--- a/tests/validation/NEON/PixelWiseMultiplication.cpp
+++ b/tests/validation/NEON/PixelWiseMultiplication.cpp
@@ -118,7 +118,7 @@ template <typename T>
using NEPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, float>;
using NEPixelWiseMultiplicationU8U8ToS16Fixture = PixelWiseMultiplicationValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, uint8_t, uint8_t, int16_t>;
template <typename T>
-using NEPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, float>;
+using NEPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture<Tensor, Accessor, NEPixelWiseMultiplication, T, T>;
using NEPixelWiseMultiplicationBroadcastQASYMM8Fixture = PixelWiseMultiplicationBroadcastValidationQuantizedFixture<Tensor, Accessor, NEPixelWiseMultiplication, uint8_t, uint8_t>;
using NEPixelWiseMultiplicationBroadcastQASYMM8SignedFixture = PixelWiseMultiplicationBroadcastValidationQuantizedFixture<Tensor, Accessor, NEPixelWiseMultiplication, int8_t, int8_t>;
@@ -493,6 +493,11 @@ TEST_SUITE(ScaleOther)
PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, ToS32Fixture<int32_t>, ALL, SmallShapes(), S32, S32, S32, scale_other, TO_ZERO, InPlaceDataSet, WRAP_VALIDATE(int32_t, 1))
TEST_SUITE_END() // ScaleOther
+TEST_SUITE(Broadcast)
+PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, BroadcastFixture<int32_t>, ALL, SmallShapesBroadcast(), S32, S32, S32, scale_unity, TO_ZERO, framework::dataset::make("InPlace", { false }),
+ WRAP_VALIDATE(int32_t, 1))
+TEST_SUITE_END() // Broadcast
+
TEST_SUITE_END() // S32toS32
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC