From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- .../elementwise_binary/generic/neon/fp16.cpp | 73 +- .../elementwise_binary/generic/neon/fp32.cpp | 73 +- .../kernels/elementwise_binary/generic/neon/impl.h | 852 +++++++++++++-------- .../elementwise_binary/generic/neon/integer.cpp | 173 ++++- .../elementwise_binary/generic/neon/qasymm8.cpp | 76 +- .../generic/neon/qasymm8_signed.cpp | 74 +- .../elementwise_binary/generic/sve/fp16.cpp | 73 +- .../elementwise_binary/generic/sve/fp32.cpp | 71 +- .../elementwise_binary/generic/sve/impl.cpp | 250 +++--- .../kernels/elementwise_binary/generic/sve/impl.h | 16 +- .../elementwise_binary/generic/sve/integer.cpp | 171 ++++- .../kernels/elementwise_binary/generic/sve2/impl.h | 379 ++++----- .../elementwise_binary/generic/sve2/qasymm8.cpp | 76 +- .../generic/sve2/qasymm8_signed.cpp | 74 +- 14 files changed, 1627 insertions(+), 804 deletions(-) (limited to 'src/cpu/kernels/elementwise_binary/generic') diff --git a/src/cpu/kernels/elementwise_binary/generic/neon/fp16.cpp b/src/cpu/kernels/elementwise_binary/generic/neon/fp16.cpp index 6091ef215e..9b4375f17c 100644 --- a/src/cpu/kernels/elementwise_binary/generic/neon/fp16.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/neon/fp16.cpp @@ -23,6 +23,7 @@ */ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/neon/impl.h" namespace arm_compute @@ -35,14 +36,38 @@ void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITenso return elementwise_arithm_op>(in1, in2, out, window); } -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) @@ -50,12 +75,30 @@ void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor * return elementwise_comp_op_16(in1, in2, out, window); } -template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -} +template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +} // namespace cpu } // namespace arm_compute #endif //defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) diff --git a/src/cpu/kernels/elementwise_binary/generic/neon/fp32.cpp b/src/cpu/kernels/elementwise_binary/generic/neon/fp32.cpp index 2d8fec91c5..53ccd89dcc 100644 --- a/src/cpu/kernels/elementwise_binary/generic/neon/fp32.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/neon/fp32.cpp @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/neon/impl.h" namespace arm_compute @@ -34,25 +35,67 @@ void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITenso return elementwise_arithm_op>(in1, in2, out, window); } -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comp_op_32(in1, in2, out, window); } -template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -} +template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +} // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/elementwise_binary/generic/neon/impl.h b/src/cpu/kernels/elementwise_binary/generic/neon/impl.h index 98b154e8fd..98f7e8b949 100644 --- a/src/cpu/kernels/elementwise_binary/generic/neon/impl.h +++ b/src/cpu/kernels/elementwise_binary/generic/neon/impl.h @@ -39,7 +39,7 @@ typename VectorType::type elementwise_arithm_op(const typename VectorType::type vec_type res = wrapper::vdup_n(static_cast(0), tag_type{}); - switch(op) + switch (op) { case ArithmeticOperation::MAX: res = wrapper::vmax(a, b); @@ -71,7 +71,9 @@ typename VectorType::type elementwise_arithm_op(const typename VectorType::type } template -typename VectorType::type elementwise_arithm_op_broadcast(const typename VectorType::type &a, const ScalarType &broadcast_value, const bool reorder) +typename VectorType::type elementwise_arithm_op_broadcast(const typename VectorType::type &a, + const ScalarType &broadcast_value, + const bool reorder) { using tag_type = typename VectorType::tag_type; using vec_type = typename VectorType::type; @@ -81,10 +83,15 @@ typename VectorType::type elementwise_arithm_op_broadcast(const typename VectorT } template -void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, - OutputScalarType (*scalar_func)(const InputScalarType &, const InputScalarType &), - int (*broadcast_func)(int, int, int, const InputScalarType *, const InputScalarType &, OutputScalarType *, const bool), - int (*neon_func)(int, int, int, const InputScalarType *, const InputScalarType *, OutputScalarType *)) +void elementwise_op( + const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window, + OutputScalarType (*scalar_func)(const InputScalarType &, const InputScalarType &), + int (*broadcast_func)( + int, int, int, const InputScalarType *, const InputScalarType &, OutputScalarType *, const bool), + int (*neon_func)(int, int, int, const InputScalarType *, const InputScalarType *, OutputScalarType *)) { // Create input windows Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); @@ -99,7 +106,7 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const const auto window_end_x = static_cast(window.x().end()); const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; @@ -114,20 +121,26 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const InputScalarType broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - - int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_value, output_ptr, !is_broadcast_input_2); - for(; x < window_end_x; ++x) + execute_window_loop( + win, + [&](const Coordinates &) { - const auto a = *(non_broadcast_input_ptr + x); - *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? broadcast_value : a, !is_broadcast_input_2 ? a : broadcast_value); - } - }, - broadcast_input, non_broadcast_input, output); + auto output_ptr = reinterpret_cast(output.ptr()); + const auto non_broadcast_input_ptr = + reinterpret_cast(non_broadcast_input.ptr()); + const InputScalarType broadcast_value = + *reinterpret_cast(broadcast_input.ptr()); + + int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, + broadcast_value, output_ptr, !is_broadcast_input_2); + for (; x < window_end_x; ++x) + { + const auto a = *(non_broadcast_input_ptr + x); + *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? broadcast_value : a, + !is_broadcast_input_2 ? a : broadcast_value); + } + }, + broadcast_input, non_broadcast_input, output); } else { @@ -139,21 +152,23 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Iterator input2(in2, input2_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); - - int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr); - for(; x < window_end_x; ++x) + execute_window_loop( + win, + [&](const Coordinates &) { - const auto a = *(input1_ptr + x); - const auto b = *(input2_ptr + x); - *(output_ptr + x) = (*scalar_func)(a, b); - } - }, - input1, input2, output); + auto output_ptr = reinterpret_cast(output.ptr()); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); + + int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr); + for (; x < window_end_x; ++x) + { + const auto a = *(input1_ptr + x); + const auto b = *(input2_ptr + x); + *(output_ptr + x) = (*scalar_func)(a, b); + } + }, + input1, input2, output); } } @@ -162,7 +177,7 @@ inline ScalarType elementwise_arithm_op_scalar(const ScalarType &a, const Scalar { auto res = ScalarType(0); - switch(op) + switch (op) { case ArithmeticOperation::MAX: res = std::max(a, b); @@ -183,10 +198,10 @@ inline ScalarType elementwise_arithm_op_scalar(const ScalarType &a, const Scalar case ArithmeticOperation::DIV: { res = a / b; - if(std::is_integral::value) + if (std::is_integral::value) { res = (b == 0) ? 0 : res; - if(static_cast(a) % static_cast(b) != 0 && ((a < 0) != (b < 0))) + if (static_cast(a) % static_cast(b) != 0 && ((a < 0) != (b < 0))) { --res; } @@ -205,43 +220,56 @@ inline ScalarType elementwise_arithm_op_scalar(const ScalarType &a, const Scalar } template <> -inline int32x4_t elementwise_arithm_op>(const int32x4_t &a, const int32x4_t &b) +inline int32x4_t +elementwise_arithm_op>(const int32x4_t &a, + const int32x4_t &b) { return vcvtq_s32_f32(vfloorq_f32(wrapper::vdiv(vcvtq_f32_s32(a), vcvtq_f32_s32(b)))); } template <> -inline float32x4_t elementwise_arithm_op>(const float32x4_t &a, const float32x4_t &b) +inline float32x4_t +elementwise_arithm_op>(const float32x4_t &a, + const float32x4_t &b) { return wrapper::vdiv(a, b); } template <> -inline float32x4_t elementwise_arithm_op>(const float32x4_t &a, const float32x4_t &b) +inline float32x4_t +elementwise_arithm_op>(const float32x4_t &a, + const float32x4_t &b) { return wrapper::vpow(a, b); } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC template <> -inline float16x8_t elementwise_arithm_op>(const float16x8_t &a, const float16x8_t &b) +inline float16x8_t elementwise_arithm_op>( + const float16x8_t &a, const float16x8_t &b) { return wrapper::vdiv(a, b); } template <> -inline float16x8_t elementwise_arithm_op>(const float16x8_t &a, const float16x8_t &b) +inline float16x8_t +elementwise_arithm_op>( + const float16x8_t &a, const float16x8_t &b) { return wrapper::vpow(a, b); } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC template -inline int elementwise_arithm_op_loop(int window_start_x, int window_end_x, int window_step_x, - const ScalarType *input1_ptr, const ScalarType *input2_ptr, ScalarType *output_ptr) +inline int elementwise_arithm_op_loop(int window_start_x, + int window_end_x, + int window_step_x, + const ScalarType *input1_ptr, + const ScalarType *input2_ptr, + ScalarType *output_ptr) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { const auto a = wrapper::vloadq(input1_ptr + x); const auto b = wrapper::vloadq(input2_ptr + x); @@ -251,14 +279,20 @@ inline int elementwise_arithm_op_loop(int window_start_x, int window_end_x, int } template -inline int elementwise_arithm_op_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, - const ScalarType *non_broadcast_input_ptr, const ScalarType &broadcast_value, ScalarType *output_ptr, const bool reorder) +inline int elementwise_arithm_op_broadcast_loop(int window_start_x, + int window_end_x, + int window_step_x, + const ScalarType *non_broadcast_input_ptr, + const ScalarType &broadcast_value, + ScalarType *output_ptr, + const bool reorder) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { const auto a = wrapper::vloadq((non_broadcast_input_ptr + x)); - wrapper::vstore(output_ptr + x, elementwise_arithm_op_broadcast(a, broadcast_value, reorder)); + wrapper::vstore(output_ptr + x, + elementwise_arithm_op_broadcast(a, broadcast_value, reorder)); } return x; } @@ -268,10 +302,10 @@ void elementwise_arithm_op(const ITensor *in1, const ITensor *in2, ITensor *out, { using scalar_type = typename VectorType::scalar_type; - elementwise_op(in1, in2, out, window, - &elementwise_arithm_op_scalar, - &elementwise_arithm_op_broadcast_loop, - &elementwise_arithm_op_loop); + elementwise_op( + in1, in2, out, window, &elementwise_arithm_op_scalar, + &elementwise_arithm_op_broadcast_loop, + &elementwise_arithm_op_loop); } template @@ -279,7 +313,7 @@ inline uint8_t elementwise_comp_op_scalar(const InputScalarType &a, const InputS { bool res = false; - switch(op) + switch (op) { case ComparisonOperation::Equal: res = (a == b); @@ -308,9 +342,9 @@ inline uint8_t elementwise_comp_op_scalar(const InputScalarType &a, const InputS template inline OutputVectorType elementwise_comp_op(const InputVectorType &a, const InputVectorType &b) { - OutputVectorType res = { 0, 0, 0, 0 }; + OutputVectorType res = {0, 0, 0, 0}; - switch(op) + switch (op) { case ComparisonOperation::Equal: res = wrapper::vceq(a, b); @@ -338,53 +372,75 @@ inline OutputVectorType elementwise_comp_op(const InputVectorType &a, const Inpu } template -inline OutputVectorType elementwise_comp_op_broadcast(const InputVectorType &a, const InputScalarType &broadcast_value, const bool reorder) +inline OutputVectorType +elementwise_comp_op_broadcast(const InputVectorType &a, const InputScalarType &broadcast_value, const bool reorder) { InputVectorType broadcast_vector = wrapper::vdup_n(broadcast_value, wrapper::traits::vector_128_tag()); - return elementwise_comp_op(reorder ? broadcast_vector : a, reorder ? a : broadcast_vector); + return elementwise_comp_op(reorder ? broadcast_vector : a, + reorder ? a : broadcast_vector); } template -inline int elementwise_comp_op_broadcast_8_loop(int window_start_x, int window_end_x, int window_step_x, - const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder) +inline int elementwise_comp_op_broadcast_8_loop(int window_start_x, + int window_end_x, + int window_step_x, + const InputScalarType *non_broadcast_input_ptr, + const InputScalarType &broadcast_value, + uint8_t *output_ptr, + const bool reorder) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - const auto a = elementwise_comp_op_broadcast(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder); + const auto a = elementwise_comp_op_broadcast( + wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder); wrapper::vstore(output_ptr + x, a); } return x; } template -inline int elementwise_comp_op_broadcast_16_loop(int window_start_x, int window_end_x, int window_step_x, - const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder) +inline int elementwise_comp_op_broadcast_16_loop(int window_start_x, + int window_end_x, + int window_step_x, + const InputScalarType *non_broadcast_input_ptr, + const InputScalarType &broadcast_value, + uint8_t *output_ptr, + const bool reorder) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - const auto a = elementwise_comp_op_broadcast(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder); + const auto a = elementwise_comp_op_broadcast( + wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder); wrapper::vstore(output_ptr + x, wrapper::vmovn(a)); } return x; } template -inline int elementwise_comp_op_broadcast_32_loop(int window_start_x, int window_end_x, int window_step_x, - const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder) +inline int elementwise_comp_op_broadcast_32_loop(int window_start_x, + int window_end_x, + int window_step_x, + const InputScalarType *non_broadcast_input_ptr, + const InputScalarType &broadcast_value, + uint8_t *output_ptr, + const bool reorder) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - const auto a = elementwise_comp_op_broadcast(wrapper::vloadq(non_broadcast_input_ptr + x), broadcast_value, reorder); - const auto b = elementwise_comp_op_broadcast(wrapper::vloadq(non_broadcast_input_ptr + x + 4), broadcast_value, reorder); + const auto a = elementwise_comp_op_broadcast( + wrapper::vloadq(non_broadcast_input_ptr + x), broadcast_value, reorder); + const auto b = elementwise_comp_op_broadcast( + wrapper::vloadq(non_broadcast_input_ptr + x + 4), broadcast_value, reorder); wrapper::vstore(output_ptr + x, wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(a), wrapper::vmovn(b)))); } - if(x <= window_end_x - 4) + if (x <= window_end_x - 4) { - const auto a = elementwise_comp_op_broadcast(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder); - for(int i = 0; i < 4; i++) + const auto a = elementwise_comp_op_broadcast( + wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder); + for (int i = 0; i < 4; i++) { *(output_ptr + x + i) = wrapper::vgetlane(a, i); } @@ -394,11 +450,15 @@ inline int elementwise_comp_op_broadcast_32_loop(int window_start_x, int window_ } template -inline int elementwise_comp_op_8_loop(int window_start_x, int window_end_x, int window_step_x, - const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr) +inline int elementwise_comp_op_8_loop(int window_start_x, + int window_end_x, + int window_step_x, + const InputScalarType *input1_ptr, + const InputScalarType *input2_ptr, + uint8_t *output_ptr) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { const auto a = wrapper::vloadq(input1_ptr + x); const auto b = wrapper::vloadq(input2_ptr + x); @@ -409,11 +469,15 @@ inline int elementwise_comp_op_8_loop(int window_start_x, int window_end_x, int } template -inline int elementwise_comp_op_16_loop(int window_start_x, int window_end_x, int window_step_x, - const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr) +inline int elementwise_comp_op_16_loop(int window_start_x, + int window_end_x, + int window_step_x, + const InputScalarType *input1_ptr, + const InputScalarType *input2_ptr, + uint8_t *output_ptr) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { const auto a = wrapper::vloadq(input1_ptr + x); const auto b = wrapper::vloadq(input2_ptr + x); @@ -424,11 +488,15 @@ inline int elementwise_comp_op_16_loop(int window_start_x, int window_end_x, int } template -inline int elementwise_comp_op_32_loop(int window_start_x, int window_end_x, int window_step_x, - const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr) +inline int elementwise_comp_op_32_loop(int window_start_x, + int window_end_x, + int window_step_x, + const InputScalarType *input1_ptr, + const InputScalarType *input2_ptr, + uint8_t *output_ptr) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { auto a = wrapper::vloadq(input1_ptr + x); auto b = wrapper::vloadq(input2_ptr + x); @@ -438,12 +506,12 @@ inline int elementwise_comp_op_32_loop(int window_start_x, int window_end_x, int const auto res2 = elementwise_comp_op(a, b); wrapper::vstore(output_ptr + x, wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(res), wrapper::vmovn(res2)))); } - if(x <= window_end_x - 4) + if (x <= window_end_x - 4) { const auto a = wrapper::vloadq(input1_ptr + x); const auto b = wrapper::vloadq(input2_ptr + x); const auto res = elementwise_comp_op(a, b); - for(int i = 0; i < 4; i++) + for (int i = 0; i < 4; i++) { *(output_ptr + x + i) = wrapper::vgetlane(res, i); } @@ -455,57 +523,59 @@ inline int elementwise_comp_op_32_loop(int window_start_x, int window_end_x, int template void elementwise_comp_op_8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { - elementwise_op(in1, in2, out, window, - &elementwise_comp_op_scalar, - &elementwise_comp_op_broadcast_8_loop, - &elementwise_comp_op_8_loop); + elementwise_op( + in1, in2, out, window, &elementwise_comp_op_scalar, + &elementwise_comp_op_broadcast_8_loop, + &elementwise_comp_op_8_loop); } template void elementwise_comp_op_16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { - elementwise_op(in1, in2, out, window, - &elementwise_comp_op_scalar, - &elementwise_comp_op_broadcast_16_loop, - &elementwise_comp_op_16_loop); + elementwise_op( + in1, in2, out, window, &elementwise_comp_op_scalar, + &elementwise_comp_op_broadcast_16_loop, + &elementwise_comp_op_16_loop); } template void elementwise_comp_op_32(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { - elementwise_op(in1, in2, out, window, - &elementwise_comp_op_scalar, - &elementwise_comp_op_broadcast_32_loop, - &elementwise_comp_op_32_loop); + elementwise_op( + in1, in2, out, window, &elementwise_comp_op_scalar, + &elementwise_comp_op_broadcast_32_loop, + &elementwise_comp_op_32_loop); } inline float32x4x4_t load_quantized(const uint8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale) { - qasymm8x16_t x = vld1q_u8(input1_ptr); - const float32x4x4_t out = - { - { - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(x))))), offset)), scale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(x))))), offset)), scale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(x))))), offset)), scale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(x))))), offset)), scale), - } - }; + qasymm8x16_t x = vld1q_u8(input1_ptr); + const float32x4x4_t out = {{ + vmulq_f32( + vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(x))))), offset)), + scale), + vmulq_f32( + vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(x))))), offset)), + scale), + vmulq_f32( + vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(x))))), offset)), + scale), + vmulq_f32(vcvtq_f32_s32( + vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(x))))), offset)), + scale), + }}; return out; } inline float32x4x4_t load_quantized_signed(const int8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale) { - qasymm8x16_signed_t x = vld1q_s8(input1_ptr); - const float32x4x4_t out = - { - { - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale), - vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale), - } - }; + qasymm8x16_signed_t x = vld1q_s8(input1_ptr); + const float32x4x4_t out = {{ + vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale), + vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale), + vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale), + vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale), + }}; return out; } @@ -523,17 +593,15 @@ inline void store_quantized(uint8_t *output_ptr, const int32x4x4_t &out) vst1q_u8(output_ptr, vcombine_u8(pa, pb)); } -inline void store_quantized(uint8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale) +inline void +store_quantized(uint8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale) { - int32x4x4_t out = - { - { - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)), - } - }; + int32x4x4_t out = {{ + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)), + }}; store_quantized(output_ptr, out); } @@ -544,17 +612,17 @@ inline void store_quantized_signed(int8_t *output_ptr, const int32x4x4_t &out) vst1q_s8(output_ptr, vcombine_s8(pa, pb)); } -inline void store_quantized_signed(int8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale) +inline void store_quantized_signed(int8_t *output_ptr, + const float32x4x4_t &rf, + const float32x4_t &offset, + const float32x4_t &invscale) { - int32x4x4_t out = - { - { - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)), - } - }; + int32x4x4_t out = {{ + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)), + }}; store_quantized_signed(output_ptr, out); } @@ -565,7 +633,8 @@ inline uint8_t elementwise_arithm_op_quantized_scalar(const float &a, const floa } template -inline int8_t elementwise_arithm_op_quantized_signed_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo) +inline int8_t +elementwise_arithm_op_quantized_signed_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo) { return quantize_qasymm8_signed(elementwise_arithm_op_scalar(a, b), qinfo); } @@ -574,15 +643,12 @@ template float32x4x4_t elementwise_arithm_op(const float32x4x4_t &a, const float32x4x4_t &b) { using neon_vector_float = wrapper::traits::neon_vector; - float32x4x4_t out = - { - { - elementwise_arithm_op(a.val[0], b.val[0]), - elementwise_arithm_op(a.val[1], b.val[1]), - elementwise_arithm_op(a.val[2], b.val[2]), - elementwise_arithm_op(a.val[3], b.val[3]), - } - }; + float32x4x4_t out = {{ + elementwise_arithm_op(a.val[0], b.val[0]), + elementwise_arithm_op(a.val[1], b.val[1]), + elementwise_arithm_op(a.val[2], b.val[2]), + elementwise_arithm_op(a.val[3], b.val[3]), + }}; return out; } @@ -596,26 +662,29 @@ inline uint8_t elementwise_comp_op_quantized_scalar(const float &a, const float template inline uint32x4x4_t elementwise_comp_op(const float32x4x4_t &a, const float32x4x4_t &b) { - uint32x4x4_t out = - { - { - elementwise_comp_op(a.val[0], b.val[0]), - elementwise_comp_op(a.val[1], b.val[1]), - elementwise_comp_op(a.val[2], b.val[2]), - elementwise_comp_op(a.val[3], b.val[3]) - } - }; + uint32x4x4_t out = {{elementwise_comp_op(a.val[0], b.val[0]), + elementwise_comp_op(a.val[1], b.val[1]), + elementwise_comp_op(a.val[2], b.val[2]), + elementwise_comp_op(a.val[3], b.val[3])}}; return out; } template -inline int elementwise_arithm_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x, - const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr, - int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, - float32x4_t voffseto, float32x4_t invvscaleo) +inline int elementwise_arithm_op_quantized_loop(int window_start_x, + int window_end_x, + int window_step_x, + const uint8_t *input1_ptr, + const uint8_t *input2_ptr, + uint8_t *output_ptr, + int32x4_t voffset1, + int32x4_t voffset2, + float32x4_t vscale1, + float32x4_t vscale2, + float32x4_t voffseto, + float32x4_t invvscaleo) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { // Get inputs and compute output const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1); @@ -627,13 +696,21 @@ inline int elementwise_arithm_op_quantized_loop(int window_start_x, int window_e } template -inline int elementwise_arithm_op_quantized_singed_loop(int window_start_x, int window_end_x, int window_step_x, - const int8_t *input1_ptr, const int8_t *input2_ptr, int8_t *output_ptr, - int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, - float32x4_t voffseto, float32x4_t invvscaleo) +inline int elementwise_arithm_op_quantized_singed_loop(int window_start_x, + int window_end_x, + int window_step_x, + const int8_t *input1_ptr, + const int8_t *input2_ptr, + int8_t *output_ptr, + int32x4_t voffset1, + int32x4_t voffset2, + float32x4_t vscale1, + float32x4_t vscale2, + float32x4_t voffseto, + float32x4_t invvscaleo) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { // Get inputs and compute output const float32x4x4_t af = load_quantized_signed(input1_ptr + x, voffset1, vscale1); @@ -645,45 +722,71 @@ inline int elementwise_arithm_op_quantized_singed_loop(int window_start_x, int w } template -inline int elementwise_arithm_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, - const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr, - int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, - float32x4_t voffseto, float32x4_t invvscaleo, bool reorder) +inline int elementwise_arithm_op_quantized_broadcast_loop(int window_start_x, + int window_end_x, + int window_step_x, + const uint8_t *non_broadcast_input_ptr, + float32x4x4_t broadcast_vector, + uint8_t *output_ptr, + int32x4_t voffset_non_broadcast, + float32x4_t vscale_non_broadcast, + float32x4_t voffseto, + float32x4_t invvscaleo, + bool reorder) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); - const float32x4x4_t rf = elementwise_arithm_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); + const float32x4x4_t af = + load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); + const float32x4x4_t rf = + elementwise_arithm_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); store_quantized(output_ptr + x, rf, voffseto, invvscaleo); } return x; } template -inline int elementwise_arithm_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, - const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, int8_t *output_ptr, - int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, - float32x4_t voffseto, float32x4_t invvscaleo, bool reorder) +inline int elementwise_arithm_op_quantized_signed_broadcast_loop(int window_start_x, + int window_end_x, + int window_step_x, + const int8_t *non_broadcast_input_ptr, + float32x4x4_t broadcast_vector, + int8_t *output_ptr, + int32x4_t voffset_non_broadcast, + float32x4_t vscale_non_broadcast, + float32x4_t voffseto, + float32x4_t invvscaleo, + bool reorder) { int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - const float32x4x4_t af = load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); - const float32x4x4_t rf = elementwise_arithm_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); + const float32x4x4_t af = + load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); + const float32x4x4_t rf = + elementwise_arithm_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); store_quantized_signed(output_ptr + x, rf, voffseto, invvscaleo); } return x; } template -inline int elementwise_comp_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x, - const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr, - int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, - float32x4_t voffseto, float32x4_t invvscaleo) +inline int elementwise_comp_op_quantized_loop(int window_start_x, + int window_end_x, + int window_step_x, + const uint8_t *input1_ptr, + const uint8_t *input2_ptr, + uint8_t *output_ptr, + int32x4_t voffset1, + int32x4_t voffset2, + float32x4_t vscale1, + float32x4_t vscale2, + float32x4_t voffseto, + float32x4_t invvscaleo) { ARM_COMPUTE_UNUSED(voffseto, invvscaleo); int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1); const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2); @@ -694,14 +797,22 @@ inline int elementwise_comp_op_quantized_loop(int window_start_x, int window_end } template -inline int elementwise_comp_op_quantized_signed_loop(int window_start_x, int window_end_x, int window_step_x, - const int8_t *input1_ptr, const int8_t *input2_ptr, uint8_t *output_ptr, - int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, - float32x4_t voffseto, float32x4_t invvscaleo) +inline int elementwise_comp_op_quantized_signed_loop(int window_start_x, + int window_end_x, + int window_step_x, + const int8_t *input1_ptr, + const int8_t *input2_ptr, + uint8_t *output_ptr, + int32x4_t voffset1, + int32x4_t voffset2, + float32x4_t vscale1, + float32x4_t vscale2, + float32x4_t voffseto, + float32x4_t invvscaleo) { ARM_COMPUTE_UNUSED(voffseto, invvscaleo); int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { const float32x4x4_t af = load_quantized_signed(input1_ptr + x, voffset1, vscale1); const float32x4x4_t bf = load_quantized_signed(input2_ptr + x, voffset2, vscale2); @@ -712,46 +823,85 @@ inline int elementwise_comp_op_quantized_signed_loop(int window_start_x, int win } template -inline int elementwise_comp_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, - const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr, - int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, - float32x4_t voffseto, float32x4_t invvscaleo, bool reorder) +inline int elementwise_comp_op_quantized_broadcast_loop(int window_start_x, + int window_end_x, + int window_step_x, + const uint8_t *non_broadcast_input_ptr, + float32x4x4_t broadcast_vector, + uint8_t *output_ptr, + int32x4_t voffset_non_broadcast, + float32x4_t vscale_non_broadcast, + float32x4_t voffseto, + float32x4_t invvscaleo, + bool reorder) { ARM_COMPUTE_UNUSED(voffseto, invvscaleo); int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); - const uint32x4x4_t rf = elementwise_comp_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); + const float32x4x4_t af = + load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); + const uint32x4x4_t rf = + elementwise_comp_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); store_quantized(output_ptr + x, rf); } return x; } template -inline int elementwise_comp_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, - const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr, - int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, - float32x4_t voffseto, float32x4_t invvscaleo, bool reorder) +inline int elementwise_comp_op_quantized_signed_broadcast_loop(int window_start_x, + int window_end_x, + int window_step_x, + const int8_t *non_broadcast_input_ptr, + float32x4x4_t broadcast_vector, + uint8_t *output_ptr, + int32x4_t voffset_non_broadcast, + float32x4_t vscale_non_broadcast, + float32x4_t voffseto, + float32x4_t invvscaleo, + bool reorder) { ARM_COMPUTE_UNUSED(voffseto, invvscaleo); int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - const float32x4x4_t af = load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); - const uint32x4x4_t rf = elementwise_comp_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); + const float32x4x4_t af = + load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast); + const uint32x4x4_t rf = + elementwise_comp_op(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector); store_quantized(output_ptr + x, rf); } return x; } -inline void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, +inline void elementwise_op_quantized(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window, uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo), - int (*broadcast_func)(int, int, int, const uint8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t, - float32x4_t, float32x4_t, const bool), - int (*neon_func)(int, int, int, const uint8_t *, const uint8_t *, uint8_t *, - int32x4_t, int32x4_t, float32x4_t, float32x4_t, - float32x4_t, float32x4_t)) + int (*broadcast_func)(int, + int, + int, + const uint8_t *, + float32x4x4_t, + uint8_t *, + int32x4_t, + float32x4_t, + float32x4_t, + float32x4_t, + const bool), + int (*neon_func)(int, + int, + int, + const uint8_t *, + const uint8_t *, + uint8_t *, + int32x4_t, + int32x4_t, + float32x4_t, + float32x4_t, + float32x4_t, + float32x4_t)) { // Create input windows Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); @@ -772,7 +922,7 @@ inline void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITe const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset + 0.5f); const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { // Select the broadcast input on the X axis const bool is_broadcast_input_2 = input2_win.x().step() == 0; @@ -794,24 +944,28 @@ inline void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITe Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); + execute_window_loop( + win, + [&](const Coordinates &) + { + const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); - const uint8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_u8(broadcast_value), broadcast_qinfo); + const uint8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); + const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_u8(broadcast_value), broadcast_qinfo); - int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr, - voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2); - for(; x < window_end_x; ++x) - { - const float afs = dequantize_qasymm8(*(non_broadcast_input_ptr + x), non_broadcast_qinfo); - const float bfs = dequantize_qasymm8(broadcast_value, broadcast_qinfo); - *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo); - } - }, - broadcast_input, non_broadcast_input, output); + int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, + broadcast_vector, output_ptr, voffset_non_broadcast, vscale_non_broadcast, + voffseto, invvscaleo, !is_broadcast_input_2); + for (; x < window_end_x; ++x) + { + const float afs = dequantize_qasymm8(*(non_broadcast_input_ptr + x), non_broadcast_qinfo); + const float bfs = dequantize_qasymm8(broadcast_value, broadcast_qinfo); + *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, + !is_broadcast_input_2 ? afs : bfs, output_qinfo); + } + }, + broadcast_input, non_broadcast_input, output); } else { @@ -834,32 +988,56 @@ inline void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITe Iterator input2(in2, input2_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2, - vscale1, vscale2, voffseto, invvscaleo); - for(; x < window_end_x; ++x) + execute_window_loop( + win, + [&](const Coordinates &) { - const float afs = dequantize_qasymm8(*(input1_ptr + x), input1_qinfo); - const float bfs = dequantize_qasymm8(*(input2_ptr + x), input2_qinfo); - *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo); - } - }, - input1, input2, output); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); + + int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, + voffset1, voffset2, vscale1, vscale2, voffseto, invvscaleo); + for (; x < window_end_x; ++x) + { + const float afs = dequantize_qasymm8(*(input1_ptr + x), input1_qinfo); + const float bfs = dequantize_qasymm8(*(input2_ptr + x), input2_qinfo); + *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo); + } + }, + input1, input2, output); } } -inline void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, - uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo), - int (*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t, - float32x4_t, float32x4_t, const bool), - int (*neon_func)(int, int, int, const int8_t *, const int8_t *, uint8_t *, - int32x4_t, int32x4_t, float32x4_t, float32x4_t, - float32x4_t, float32x4_t)) +inline void +elementwise_comp_quantized_signed(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window, + uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo), + int (*broadcast_func)(int, + int, + int, + const int8_t *, + float32x4x4_t, + uint8_t *, + int32x4_t, + float32x4_t, + float32x4_t, + float32x4_t, + const bool), + int (*neon_func)(int, + int, + int, + const int8_t *, + const int8_t *, + uint8_t *, + int32x4_t, + int32x4_t, + float32x4_t, + float32x4_t, + float32x4_t, + float32x4_t)) { // Create input windows Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); @@ -879,7 +1057,7 @@ inline void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset); const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { // Select the broadcast input on the X axis const bool is_broadcast_input_2 = input2_win.x().step() == 0; @@ -901,24 +1079,28 @@ inline void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); + execute_window_loop( + win, + [&](const Coordinates &) + { + const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); - const int8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo); + const int8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); + const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo); - int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr, - voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2); - for(; x < window_end_x; ++x) - { - const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo); - const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo); - *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo); - } - }, - broadcast_input, non_broadcast_input, output); + int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, + broadcast_vector, output_ptr, voffset_non_broadcast, vscale_non_broadcast, + voffseto, invvscaleo, !is_broadcast_input_2); + for (; x < window_end_x; ++x) + { + const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo); + const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo); + *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, + !is_broadcast_input_2 ? afs : bfs, output_qinfo); + } + }, + broadcast_input, non_broadcast_input, output); } else { @@ -941,32 +1123,56 @@ inline void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor Iterator input2(in2, input2_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2, - vscale1, vscale2, voffseto, invvscaleo); - for(; x < window_end_x; ++x) + execute_window_loop( + win, + [&](const Coordinates &) { - const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo); - const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo); - *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo); - } - }, - input1, input2, output); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); + + int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, + voffset1, voffset2, vscale1, vscale2, voffseto, invvscaleo); + for (; x < window_end_x; ++x) + { + const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo); + const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo); + *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo); + } + }, + input1, input2, output); } } -inline void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, - int8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo), - int (*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, int8_t *, int32x4_t, float32x4_t, - float32x4_t, float32x4_t, const bool), - int (*neon_func)(int, int, int, const int8_t *, const int8_t *, int8_t *, - int32x4_t, int32x4_t, float32x4_t, float32x4_t, - float32x4_t, float32x4_t)) +inline void +elementwise_op_quantized_signed(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window, + int8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo), + int (*broadcast_func)(int, + int, + int, + const int8_t *, + float32x4x4_t, + int8_t *, + int32x4_t, + float32x4_t, + float32x4_t, + float32x4_t, + const bool), + int (*neon_func)(int, + int, + int, + const int8_t *, + const int8_t *, + int8_t *, + int32x4_t, + int32x4_t, + float32x4_t, + float32x4_t, + float32x4_t, + float32x4_t)) { // Create input windows Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()); @@ -986,7 +1192,7 @@ inline void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *i const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset); const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { // Select the broadcast input on the X axis const bool is_broadcast_input_2 = input2_win.x().step() == 0; @@ -1008,24 +1214,28 @@ inline void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *i Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); + execute_window_loop( + win, + [&](const Coordinates &) + { + const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); - const int8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo); + const int8_t broadcast_value = *reinterpret_cast(broadcast_input.ptr()); + const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo); - int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr, - voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2); - for(; x < window_end_x; ++x) - { - const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo); - const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo); - *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo); - } - }, - broadcast_input, non_broadcast_input, output); + int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, + broadcast_vector, output_ptr, voffset_non_broadcast, vscale_non_broadcast, + voffseto, invvscaleo, !is_broadcast_input_2); + for (; x < window_end_x; ++x) + { + const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo); + const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo); + *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, + !is_broadcast_input_2 ? afs : bfs, output_qinfo); + } + }, + broadcast_input, non_broadcast_input, output); } else { @@ -1048,22 +1258,24 @@ inline void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *i Iterator input2(in2, input2_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2, - vscale1, vscale2, voffseto, invvscaleo); - for(; x < window_end_x; ++x) + execute_window_loop( + win, + [&](const Coordinates &) { - const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo); - const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo); - *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo); - } - }, - input1, input2, output); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); + + int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, + voffset1, voffset2, vscale1, vscale2, voffseto, invvscaleo); + for (; x < window_end_x; ++x) + { + const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo); + const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo); + *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo); + } + }, + input1, input2, output); } } diff --git a/src/cpu/kernels/elementwise_binary/generic/neon/integer.cpp b/src/cpu/kernels/elementwise_binary/generic/neon/integer.cpp index c5c528d3f3..09ad13d5eb 100644 --- a/src/cpu/kernels/elementwise_binary/generic/neon/integer.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/neon/integer.cpp @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/neon/impl.h" namespace arm_compute { @@ -33,63 +34,165 @@ void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor return elementwise_arithm_op>(in1, in2, out, window); } -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_arithm_op>(in1, in2, out, window); } -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void neon_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comp_op_8(in1, in2, out, window); } -template void neon_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void neon_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comp_op_16(in1, in2, out, window); } -template void neon_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void neon_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comp_op_32(in1, in2, out, window); } -template void neon_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -} +template void neon_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +} // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8.cpp b/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8.cpp index fa8e08745a..d891f70644 100644 --- a/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8.cpp @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/neon/impl.h" namespace arm_compute { @@ -33,27 +34,72 @@ void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITe return elementwise_arithm_op_quantized(in1, in2, out, window); } -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template -void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) +void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window) { return elementwise_comp_op_quantized(in1, in2, out, window); } -template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); } // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8_signed.cpp index abfdf93b75..b1f8e018f5 100644 --- a/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8_signed.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/neon/qasymm8_signed.cpp @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/neon/impl.h" namespace arm_compute @@ -34,27 +35,70 @@ void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *i return elementwise_arithm_op_quantized_signed(in1, in2, out, window); } -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template -void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) +void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window) { return elementwise_comp_op_quantized_signed(in1, in2, out, window); } -template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_comparison_elementwise_binary( + const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void neon_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); } // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/elementwise_binary/generic/sve/fp16.cpp b/src/cpu/kernels/elementwise_binary/generic/sve/fp16.cpp index 85224351df..600c7f1c05 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve/fp16.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/sve/fp16.cpp @@ -25,6 +25,7 @@ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/sve/impl.h" namespace arm_compute { @@ -36,14 +37,38 @@ void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor return elementwise_arithmetic_op(in1, in2, out, op, window); } -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) @@ -51,14 +76,32 @@ void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *i return elementwise_comparison_op(in1, in2, out, op, window); } -template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); } // namespace cpu } // namespace arm_compute -#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ \ No newline at end of file +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ diff --git a/src/cpu/kernels/elementwise_binary/generic/sve/fp32.cpp b/src/cpu/kernels/elementwise_binary/generic/sve/fp32.cpp index 2b479f76f1..832a966883 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve/fp32.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/sve/fp32.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/sve/impl.h" namespace arm_compute { @@ -34,26 +35,68 @@ void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor return elementwise_arithmetic_op(in1, in2, out, op, window); } -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comparison_op(in1, in2, out, op, window); } -template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_fp32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); } // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp b/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp index c0515f2abc..fa48407e9b 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp @@ -23,7 +23,9 @@ */ #include "src/cpu/kernels/elementwise_binary/generic/sve/impl.h" + #include "src/core/NEON/SVEMath.h" + #include namespace arm_compute @@ -33,7 +35,8 @@ namespace cpu using namespace arm_compute::wrapper; template -void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, ArithmeticOperation op, const Window &window) +void elementwise_arithmetic_op( + const ITensor *in1, const ITensor *in2, ITensor *out, ArithmeticOperation op, const Window &window) { using VectorType = typename sve_vector::type; @@ -51,7 +54,7 @@ void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor * const auto window_end_x = static_cast(window.x().end()); const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; @@ -66,37 +69,40 @@ void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor * Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const ScalarType broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const auto broadcast_vector = svdup_n(broadcast_value); - - int x = window_start_x; - - svbool_t pg = svwhilelt(x, window_end_x); - do + execute_window_loop( + win, + [&](const Coordinates &) { - const auto non_broadcast_vector = svld1(pg, non_broadcast_input_ptr + x); - VectorType res{}; + auto output_ptr = reinterpret_cast(output.ptr()); + const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); + const ScalarType broadcast_value = *reinterpret_cast(broadcast_input.ptr()); + const auto broadcast_vector = svdup_n(broadcast_value); - if(is_broadcast_input_2) - { - res = elementwise_arithmetic_op::type>(pg, non_broadcast_vector, broadcast_vector, op); - } - else + int x = window_start_x; + + svbool_t pg = svwhilelt(x, window_end_x); + do { - res = elementwise_arithmetic_op::type>(pg, broadcast_vector, non_broadcast_vector, op); - } - svst1(pg, output_ptr + x, res); - - x += svcnt(); - pg = svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - broadcast_input, non_broadcast_input, output); + const auto non_broadcast_vector = svld1(pg, non_broadcast_input_ptr + x); + VectorType res{}; + + if (is_broadcast_input_2) + { + res = elementwise_arithmetic_op::type>(pg, non_broadcast_vector, + broadcast_vector, op); + } + else + { + res = elementwise_arithmetic_op::type>( + pg, broadcast_vector, non_broadcast_vector, op); + } + svst1(pg, output_ptr + x, res); + + x += svcnt(); + pg = svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + broadcast_input, non_broadcast_input, output); } else { @@ -108,39 +114,46 @@ void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor * Iterator input2(in2, input2_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); + execute_window_loop( + win, + [&](const Coordinates &) + { + auto output_ptr = reinterpret_cast(output.ptr()); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); - int x = window_start_x; + int x = window_start_x; - svbool_t pg = svwhilelt(x, window_end_x); - do - { - const auto in1 = svld1(pg, input1_ptr + x); - const auto in2 = svld1(pg, input2_ptr + x); - const auto res = elementwise_arithmetic_op::type>(pg, in1, in2, op); - svst1(pg, output_ptr + x, res); - - x += svcnt(); - pg = svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - input1, input2, output); + svbool_t pg = svwhilelt(x, window_end_x); + do + { + const auto in1 = svld1(pg, input1_ptr + x); + const auto in2 = svld1(pg, input2_ptr + x); + const auto res = elementwise_arithmetic_op::type>(pg, in1, in2, op); + svst1(pg, output_ptr + x, res); + + x += svcnt(); + pg = svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + input1, input2, output); } } -template void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); -template void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); -template void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); -template void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); +template void elementwise_arithmetic_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); +template void elementwise_arithmetic_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); +template void elementwise_arithmetic_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); +template void elementwise_arithmetic_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ArithmeticOperation op, const Window &window); template -void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, ComparisonOperation op, const Window &window) +void elementwise_comparison_op( + const ITensor *in1, const ITensor *in2, ITensor *out, ComparisonOperation op, const Window &window) { - static_assert(sizeof(InputScalarType) >= sizeof(OutputScalarType), "input data type's width should be equal to or greater than output data type's width"); + static_assert(sizeof(InputScalarType) >= sizeof(OutputScalarType), + "input data type's width should be equal to or greater than output data type's width"); using OutputVectorType = typename sve_vector::type; const auto all_true_pg = svptrue(); @@ -157,7 +170,7 @@ void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor * const auto window_end_x = static_cast(window.x().end()); const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; @@ -172,37 +185,44 @@ void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor * Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const InputScalarType broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const auto broadcast_vector = svdup_n(broadcast_value); + execute_window_loop( + win, + [&](const Coordinates &) + { + auto output_ptr = reinterpret_cast(output.ptr()); + const auto non_broadcast_input_ptr = + reinterpret_cast(non_broadcast_input.ptr()); + const InputScalarType broadcast_value = + *reinterpret_cast(broadcast_input.ptr()); + const auto broadcast_vector = svdup_n(broadcast_value); - int x = window_start_x; + int x = window_start_x; - svbool_t pg = svwhilelt(x, window_end_x); - do - { - const auto non_broadcast_vector = svld1(pg, non_broadcast_input_ptr + x); - const svbool_t output_pg = narrow_to_byte_predicate(pg); - OutputVectorType res{}; - if(is_broadcast_input_2) - { - res = elementwise_comparison_op::type, typename sve_vector::type>(pg, non_broadcast_vector, broadcast_vector, op); - } - else + svbool_t pg = svwhilelt(x, window_end_x); + do { - res = elementwise_comparison_op::type, typename sve_vector::type>(pg, broadcast_vector, non_broadcast_vector, op); - } - svst1(output_pg, output_ptr + x, res); - - x += svcnt(); - pg = svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - broadcast_input, non_broadcast_input, output); + const auto non_broadcast_vector = svld1(pg, non_broadcast_input_ptr + x); + const svbool_t output_pg = narrow_to_byte_predicate(pg); + OutputVectorType res{}; + if (is_broadcast_input_2) + { + res = elementwise_comparison_op::type, + typename sve_vector::type>( + pg, non_broadcast_vector, broadcast_vector, op); + } + else + { + res = elementwise_comparison_op::type, + typename sve_vector::type>( + pg, broadcast_vector, non_broadcast_vector, op); + } + svst1(output_pg, output_ptr + x, res); + + x += svcnt(); + pg = svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + broadcast_input, non_broadcast_input, output); } else { @@ -214,37 +234,45 @@ void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor * Iterator input2(in2, input2_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); + execute_window_loop( + win, + [&](const Coordinates &) + { + auto output_ptr = reinterpret_cast(output.ptr()); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); - int x = window_start_x; + int x = window_start_x; - svbool_t pg = svwhilelt(x, window_end_x); - do - { - const auto in1 = svld1(pg, input1_ptr + x); - const auto in2 = svld1(pg, input2_ptr + x); - const auto res = elementwise_comparison_op::type, typename sve_vector::type>(pg, in1, in2, op); - const svbool_t output_pg = narrow_to_byte_predicate(pg); - svst1(output_pg, output_ptr + x, res); - - x += svcnt(); - pg = svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - input1, input2, output); + svbool_t pg = svwhilelt(x, window_end_x); + do + { + const auto in1 = svld1(pg, input1_ptr + x); + const auto in2 = svld1(pg, input2_ptr + x); + const auto res = + elementwise_comparison_op::type, + typename sve_vector::type>(pg, in1, in2, op); + const svbool_t output_pg = narrow_to_byte_predicate(pg); + svst1(output_pg, output_ptr + x, res); + + x += svcnt(); + pg = svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + input1, input2, output); } } -template void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); -template void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); -template void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); -template void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); -template void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); +template void elementwise_comparison_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); +template void elementwise_comparison_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); +template void elementwise_comparison_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); +template void elementwise_comparison_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); +template void elementwise_comparison_op( + const ITensor *in1, const ITensor *in2, ITensor *out, const ComparisonOperation op, const Window &window); template <> svint32_t elementwise_pow(svbool_t &pg, const svint32_t &a, const svint32_t &b) diff --git a/src/cpu/kernels/elementwise_binary/generic/sve/impl.h b/src/cpu/kernels/elementwise_binary/generic/sve/impl.h index 860c50a1e0..4c61b9f315 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve/impl.h +++ b/src/cpu/kernels/elementwise_binary/generic/sve/impl.h @@ -25,6 +25,7 @@ #define SRC_CORE_SVE_KERNELS_ELEMENTWISE_LIST_H #include "arm_compute/core/Helpers.h" + #include "src/core/NEON/wrapper/intrinsics/intrinsics.h" #include "src/core/NEON/wrapper/svtraits.h" @@ -51,7 +52,7 @@ svbool_t narrow_to_byte_predicate(svbool_t pg) { const auto all_false = svpfalse(); - switch(bytewidth) + switch (bytewidth) { case 8: pg = svuzp1_b32(pg, all_false); @@ -74,7 +75,7 @@ VectorType elementwise_arithmetic_op(svbool_t &pg, const VectorType &a, const Ve using ScalarType = typename wrapper::sve_scalar::type; VectorType res{}; - switch(op) + switch (op) { case ArithmeticOperation::MAX: res = svmax_z(pg, a, b); @@ -114,11 +115,12 @@ VectorType elementwise_arithmetic_op(svbool_t &pg, const VectorType &a, const Ve } template -OutputVectorType elementwise_comparison_op(svbool_t &pg, const InputVectorType &a, const InputVectorType &b, ComparisonOperation op) +OutputVectorType +elementwise_comparison_op(svbool_t &pg, const InputVectorType &a, const InputVectorType &b, ComparisonOperation op) { svbool_t selection_vector{}; - switch(op) + switch (op) { case ComparisonOperation::Equal: selection_vector = svcmpeq(pg, a, b); @@ -154,10 +156,12 @@ OutputVectorType elementwise_comparison_op(svbool_t &pg, const InputVectorType & } template -void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, ArithmeticOperation op, const Window &window); +void elementwise_arithmetic_op( + const ITensor *in1, const ITensor *in2, ITensor *out, ArithmeticOperation op, const Window &window); template -void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, ComparisonOperation op, const Window &window); +void elementwise_comparison_op( + const ITensor *in1, const ITensor *in2, ITensor *out, ComparisonOperation op, const Window &window); } // namespace cpu } // namespace arm_compute #endif /* SRC_CORE_SVE_KERNELS_ELEMENTWISE_LIST_H */ diff --git a/src/cpu/kernels/elementwise_binary/generic/sve/integer.cpp b/src/cpu/kernels/elementwise_binary/generic/sve/integer.cpp index c313fc6e04..f7714ff7e9 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve/integer.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/sve/integer.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/sve/impl.h" namespace arm_compute { @@ -33,64 +34,166 @@ void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor { return elementwise_arithmetic_op(in1, in2, out, op, window); } -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_arithmetic_op(in1, in2, out, op, window); } -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void sve_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comparison_op(in1, in2, out, op, window); } -template void sve_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_u8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_u8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void sve_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comparison_op(in1, in2, out, op, window); } -template void sve_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s16_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s16_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template void sve_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { return elementwise_comparison_op(in1, in2, out, op, window); } -template void sve_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve_s32_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve_s32_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); } // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/elementwise_binary/generic/sve2/impl.h b/src/cpu/kernels/elementwise_binary/generic/sve2/impl.h index 41e0ac77db..7c6015d379 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve2/impl.h +++ b/src/cpu/kernels/elementwise_binary/generic/sve2/impl.h @@ -35,19 +35,14 @@ inline svfloat32x4_t load_quantized(const int8_t *ptr, svbool_t pg, const svint3 { auto x = svld1(pg, ptr); - const auto widened = svcreate4( - svmovlb(svmovlb(x)), - svmovlt(svmovlb(x)), - svmovlb(svmovlt(x)), - svmovlt(svmovlt(x))); + const auto widened = svcreate4(svmovlb(svmovlb(x)), svmovlt(svmovlb(x)), svmovlb(svmovlt(x)), svmovlt(svmovlt(x))); pg = svptrue_b8(); - return svcreate4( - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 0), offset)), scale), - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 1), offset)), scale), - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 2), offset)), scale), - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 3), offset)), scale)); + return svcreate4(svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 0), offset)), scale), + svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 1), offset)), scale), + svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 2), offset)), scale), + svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svget4(widened, 3), offset)), scale)); } inline svfloat32x4_t load_quantized(const uint8_t *ptr, svbool_t pg, const svint32_t &offset, const svfloat32_t &scale) @@ -56,28 +51,24 @@ inline svfloat32x4_t load_quantized(const uint8_t *ptr, svbool_t pg, const svint //vprint(x); - const auto widened = svcreate4( - svmovlb(svmovlb(x)), - svmovlt(svmovlb(x)), - svmovlb(svmovlt(x)), - svmovlt(svmovlt(x))); + const auto widened = svcreate4(svmovlb(svmovlb(x)), svmovlt(svmovlb(x)), svmovlb(svmovlt(x)), svmovlt(svmovlt(x))); pg = svptrue_b8(); - return svcreate4( - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 0)), offset)), scale), - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 1)), offset)), scale), - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 2)), offset)), scale), - svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 3)), offset)), scale)); + return svcreate4(svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 0)), offset)), scale), + svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 1)), offset)), scale), + svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 2)), offset)), scale), + svmul_z(pg, svcvt_f32_z(pg, svsub_z(pg, svreinterpret_s32(svget4(widened, 3)), offset)), scale)); } -inline void store_quantized(uint8_t *ptr, svbool_t pg, svfloat32x4_t data, const svint32_t &offset, const svfloat32_t &inv_scale) +inline void +store_quantized(uint8_t *ptr, svbool_t pg, svfloat32x4_t data, const svint32_t &offset, const svfloat32_t &inv_scale) { - const auto quantized = svcreate4( - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 0), inv_scale))), offset), - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 1), inv_scale))), offset), - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 2), inv_scale))), offset), - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 3), inv_scale))), offset)); + const auto quantized = + svcreate4(svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 0), inv_scale))), offset), + svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 1), inv_scale))), offset), + svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 2), inv_scale))), offset), + svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 3), inv_scale))), offset)); const auto narrowed_bottom = svqxtunt(svqxtunb(svget4(quantized, 0)), svget4(quantized, 1)); const auto narrowed_top = svqxtunt(svqxtunb(svget4(quantized, 2)), svget4(quantized, 3)); @@ -85,13 +76,14 @@ inline void store_quantized(uint8_t *ptr, svbool_t pg, svfloat32x4_t data, const svst1(pg, ptr, narrowed); } -inline void store_quantized(int8_t *ptr, svbool_t pg, svfloat32x4_t data, const svint32_t &offset, const svfloat32_t &inv_scale) +inline void +store_quantized(int8_t *ptr, svbool_t pg, svfloat32x4_t data, const svint32_t &offset, const svfloat32_t &inv_scale) { - const auto quantized = svcreate4( - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 0), inv_scale))), offset), - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 1), inv_scale))), offset), - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 2), inv_scale))), offset), - svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 3), inv_scale))), offset)); + const auto quantized = + svcreate4(svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 0), inv_scale))), offset), + svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 1), inv_scale))), offset), + svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 2), inv_scale))), offset), + svadd_z(pg, svcvt_s32_z(pg, svrinta_z(pg, svmul_z(pg, svget4(data, 3), inv_scale))), offset)); const auto narrowed_bottom = svqxtnt(svqxtnb(svget4(quantized, 0)), svget4(quantized, 1)); const auto narrowed_top = svqxtnt(svqxtnb(svget4(quantized, 2)), svget4(quantized, 3)); @@ -101,7 +93,8 @@ inline void store_quantized(int8_t *ptr, svbool_t pg, svfloat32x4_t data, const } template -void elementwise_arithmetic_quantized_op(const ITensor *in1, const ITensor *in2, ITensor *out, ArithmeticOperation op, const Window &window) +void elementwise_arithmetic_quantized_op( + const ITensor *in1, const ITensor *in2, ITensor *out, ArithmeticOperation op, const Window &window) { const auto all_true_pg = wrapper::svptrue(); @@ -120,7 +113,7 @@ void elementwise_arithmetic_quantized_op(const ITensor *in1, const ITensor *in2, const auto output_voffset = svdup_n(out->info()->quantization_info().uniform().offset); const auto output_vscale = svdup_n(1.f / out->info()->quantization_info().uniform().scale); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; @@ -128,8 +121,10 @@ void elementwise_arithmetic_quantized_op(const ITensor *in1, const ITensor *in2, const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; - const auto non_broadcast_qinfo = is_broadcast_input_2 ? in1->info()->quantization_info() : in2->info()->quantization_info(); - const auto broadcast_qinfo = is_broadcast_input_2 ? in2->info()->quantization_info() : in1->info()->quantization_info(); + const auto non_broadcast_qinfo = + is_broadcast_input_2 ? in1->info()->quantization_info() : in2->info()->quantization_info(); + const auto broadcast_qinfo = + is_broadcast_input_2 ? in2->info()->quantization_info() : in1->info()->quantization_info(); const auto non_broadcast_voffset = svdup_n(non_broadcast_qinfo.uniform().offset); const auto non_broadcast_vscale = svdup_n(non_broadcast_qinfo.uniform().scale); @@ -141,48 +136,52 @@ void elementwise_arithmetic_quantized_op(const ITensor *in1, const ITensor *in2, Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const ScalarType broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const float broadcast_value_f = Qasymm8QuantizationHelper::dequantize(broadcast_value, broadcast_qinfo); - const auto in2 = svcreate4(svdup_n(broadcast_value_f), svdup_n(broadcast_value_f), svdup_n(broadcast_value_f), svdup_n(broadcast_value_f)); - - int x = window_start_x; - - svbool_t pg = wrapper::svwhilelt(x, window_end_x); - do + execute_window_loop( + win, + [&](const Coordinates &) { - const auto in1 = load_quantized(non_broadcast_input_ptr + x, pg, non_broadcast_voffset, non_broadcast_vscale); - - svfloat32x4_t result{}; - - if(!is_broadcast_input_2) + auto output_ptr = reinterpret_cast(output.ptr()); + const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); + const ScalarType broadcast_value = *reinterpret_cast(broadcast_input.ptr()); + const float broadcast_value_f = + Qasymm8QuantizationHelper::dequantize(broadcast_value, broadcast_qinfo); + const auto in2 = svcreate4(svdup_n(broadcast_value_f), svdup_n(broadcast_value_f), + svdup_n(broadcast_value_f), svdup_n(broadcast_value_f)); + + int x = window_start_x; + + svbool_t pg = wrapper::svwhilelt(x, window_end_x); + do { - result = svcreate4( - elementwise_arithmetic_op(pg, svget4(in2, 0), svget4(in1, 0), op), - elementwise_arithmetic_op(pg, svget4(in2, 1), svget4(in1, 1), op), - elementwise_arithmetic_op(pg, svget4(in2, 2), svget4(in1, 2), op), - elementwise_arithmetic_op(pg, svget4(in2, 3), svget4(in1, 3), op)); - } - else - { - result = svcreate4( - elementwise_arithmetic_op(pg, svget4(in1, 0), svget4(in2, 0), op), - elementwise_arithmetic_op(pg, svget4(in1, 1), svget4(in2, 1), op), - elementwise_arithmetic_op(pg, svget4(in1, 2), svget4(in2, 2), op), - elementwise_arithmetic_op(pg, svget4(in1, 3), svget4(in2, 3), op)); - } - - store_quantized(output_ptr + x, pg, result, output_voffset, output_vscale); - - x += wrapper::svcnt(); - pg = wrapper::svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - broadcast_input, non_broadcast_input, output); + const auto in1 = + load_quantized(non_broadcast_input_ptr + x, pg, non_broadcast_voffset, non_broadcast_vscale); + + svfloat32x4_t result{}; + + if (!is_broadcast_input_2) + { + result = + svcreate4(elementwise_arithmetic_op(pg, svget4(in2, 0), svget4(in1, 0), op), + elementwise_arithmetic_op(pg, svget4(in2, 1), svget4(in1, 1), op), + elementwise_arithmetic_op(pg, svget4(in2, 2), svget4(in1, 2), op), + elementwise_arithmetic_op(pg, svget4(in2, 3), svget4(in1, 3), op)); + } + else + { + result = + svcreate4(elementwise_arithmetic_op(pg, svget4(in1, 0), svget4(in2, 0), op), + elementwise_arithmetic_op(pg, svget4(in1, 1), svget4(in2, 1), op), + elementwise_arithmetic_op(pg, svget4(in1, 2), svget4(in2, 2), op), + elementwise_arithmetic_op(pg, svget4(in1, 3), svget4(in2, 3), op)); + } + + store_quantized(output_ptr + x, pg, result, output_voffset, output_vscale); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + broadcast_input, non_broadcast_input, output); } else { @@ -200,41 +199,44 @@ void elementwise_arithmetic_quantized_op(const ITensor *in1, const ITensor *in2, const auto in2_voffset = svdup_n(in2->info()->quantization_info().uniform().offset); const auto in2_vscale = svdup_n(in2->info()->quantization_info().uniform().scale); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); + execute_window_loop( + win, + [&](const Coordinates &) + { + auto output_ptr = reinterpret_cast(output.ptr()); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); - int x = window_start_x; + int x = window_start_x; - svbool_t pg = wrapper::svwhilelt(x, window_end_x); - do - { - const auto in1 = load_quantized(input1_ptr + x, pg, in1_voffset, in1_vscale); - const auto in2 = load_quantized(input2_ptr + x, pg, in2_voffset, in2_vscale); - - const auto result = svcreate4( - elementwise_arithmetic_op(pg, svget4(in1, 0), svget4(in2, 0), op), - elementwise_arithmetic_op(pg, svget4(in1, 1), svget4(in2, 1), op), - elementwise_arithmetic_op(pg, svget4(in1, 2), svget4(in2, 2), op), - elementwise_arithmetic_op(pg, svget4(in1, 3), svget4(in2, 3), op)); - - store_quantized(output_ptr + x, pg, result, output_voffset, output_vscale); - - x += wrapper::svcnt(); - pg = wrapper::svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - input1, input2, output); + svbool_t pg = wrapper::svwhilelt(x, window_end_x); + do + { + const auto in1 = load_quantized(input1_ptr + x, pg, in1_voffset, in1_vscale); + const auto in2 = load_quantized(input2_ptr + x, pg, in2_voffset, in2_vscale); + + const auto result = + svcreate4(elementwise_arithmetic_op(pg, svget4(in1, 0), svget4(in2, 0), op), + elementwise_arithmetic_op(pg, svget4(in1, 1), svget4(in2, 1), op), + elementwise_arithmetic_op(pg, svget4(in1, 2), svget4(in2, 2), op), + elementwise_arithmetic_op(pg, svget4(in1, 3), svget4(in2, 3), op)); + + store_quantized(output_ptr + x, pg, result, output_voffset, output_vscale); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + input1, input2, output); } } template -void elementwise_comparison_quantized_op(const ITensor *in1, const ITensor *in2, ITensor *out, ComparisonOperation op, const Window &window) +void elementwise_comparison_quantized_op( + const ITensor *in1, const ITensor *in2, ITensor *out, ComparisonOperation op, const Window &window) { - static_assert(sizeof(InputScalarType) >= sizeof(OutputScalarType), "input data type's width should be equal to or greater than output data type's width"); + static_assert(sizeof(InputScalarType) >= sizeof(OutputScalarType), + "input data type's width should be equal to or greater than output data type's width"); using OutputVectorType = typename wrapper::traits::sve_vector::type; const auto all_true_pg = wrapper::svptrue(); @@ -251,7 +253,7 @@ void elementwise_comparison_quantized_op(const ITensor *in1, const ITensor *in2, const auto window_end_x = static_cast(window.x().end()); const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x(); - if(is_broadcast_across_x) + if (is_broadcast_across_x) { const bool is_broadcast_input_2 = input2_win.x().step() == 0; Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; @@ -259,8 +261,10 @@ void elementwise_comparison_quantized_op(const ITensor *in1, const ITensor *in2, const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1; const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1; - const auto non_broadcast_qinfo = is_broadcast_input_2 ? in1->info()->quantization_info() : in2->info()->quantization_info(); - const auto broadcast_qinfo = is_broadcast_input_2 ? in2->info()->quantization_info() : in1->info()->quantization_info(); + const auto non_broadcast_qinfo = + is_broadcast_input_2 ? in1->info()->quantization_info() : in2->info()->quantization_info(); + const auto broadcast_qinfo = + is_broadcast_input_2 ? in2->info()->quantization_info() : in1->info()->quantization_info(); const auto non_broadcast_voffset = svdup_n(non_broadcast_qinfo.uniform().offset); const auto non_broadcast_vscale = svdup_n(non_broadcast_qinfo.uniform().scale); @@ -272,51 +276,63 @@ void elementwise_comparison_quantized_op(const ITensor *in1, const ITensor *in2, Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); Iterator output(out, win); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto non_broadcast_input_ptr = reinterpret_cast(non_broadcast_input.ptr()); - const InputScalarType broadcast_value = *reinterpret_cast(broadcast_input.ptr()); - const float broadcast_value_f = Qasymm8QuantizationHelper::dequantize(broadcast_value, broadcast_qinfo); - const auto in2 = svcreate4(svdup_n(broadcast_value_f), svdup_n(broadcast_value_f), svdup_n(broadcast_value_f), svdup_n(broadcast_value_f)); - - int x = window_start_x; - - svbool_t pg = wrapper::svwhilelt(x, window_end_x); - do + execute_window_loop( + win, + [&](const Coordinates &) { - const auto in1 = load_quantized(non_broadcast_input_ptr + x, pg, non_broadcast_voffset, non_broadcast_vscale); - - svuint8x4_t result{}; - - if(!is_broadcast_input_2) + auto output_ptr = reinterpret_cast(output.ptr()); + const auto non_broadcast_input_ptr = + reinterpret_cast(non_broadcast_input.ptr()); + const InputScalarType broadcast_value = + *reinterpret_cast(broadcast_input.ptr()); + const float broadcast_value_f = + Qasymm8QuantizationHelper::dequantize(broadcast_value, broadcast_qinfo); + const auto in2 = svcreate4(svdup_n(broadcast_value_f), svdup_n(broadcast_value_f), + svdup_n(broadcast_value_f), svdup_n(broadcast_value_f)); + + int x = window_start_x; + + svbool_t pg = wrapper::svwhilelt(x, window_end_x); + do { - result = svcreate4( - elementwise_comparison_op(pg, svget4(in2, 0), svget4(in1, 0), op), - elementwise_comparison_op(pg, svget4(in2, 1), svget4(in1, 1), op), - elementwise_comparison_op(pg, svget4(in2, 2), svget4(in1, 2), op), - elementwise_comparison_op(pg, svget4(in2, 3), svget4(in1, 3), op)); - } - else - { - result = svcreate4( - elementwise_comparison_op(pg, svget4(in1, 0), svget4(in2, 0), op), - elementwise_comparison_op(pg, svget4(in1, 1), svget4(in2, 1), op), - elementwise_comparison_op(pg, svget4(in1, 2), svget4(in2, 2), op), - elementwise_comparison_op(pg, svget4(in1, 3), svget4(in2, 3), op)); - } - - const auto zipped_bottom = svzip1(svget4(result, 0), svget4(result, 1)); - const auto zipped_top = svzip1(svget4(result, 2), svget4(result, 3)); - const auto zipped = svzip1(zipped_bottom, zipped_top); - svst1(pg, output_ptr + x, zipped); - - x += wrapper::svcnt(); - pg = wrapper::svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - broadcast_input, non_broadcast_input, output); + const auto in1 = + load_quantized(non_broadcast_input_ptr + x, pg, non_broadcast_voffset, non_broadcast_vscale); + + svuint8x4_t result{}; + + if (!is_broadcast_input_2) + { + result = svcreate4(elementwise_comparison_op(pg, svget4(in2, 0), + svget4(in1, 0), op), + elementwise_comparison_op(pg, svget4(in2, 1), + svget4(in1, 1), op), + elementwise_comparison_op(pg, svget4(in2, 2), + svget4(in1, 2), op), + elementwise_comparison_op( + pg, svget4(in2, 3), svget4(in1, 3), op)); + } + else + { + result = svcreate4(elementwise_comparison_op(pg, svget4(in1, 0), + svget4(in2, 0), op), + elementwise_comparison_op(pg, svget4(in1, 1), + svget4(in2, 1), op), + elementwise_comparison_op(pg, svget4(in1, 2), + svget4(in2, 2), op), + elementwise_comparison_op( + pg, svget4(in1, 3), svget4(in2, 3), op)); + } + + const auto zipped_bottom = svzip1(svget4(result, 0), svget4(result, 1)); + const auto zipped_top = svzip1(svget4(result, 2), svget4(result, 3)); + const auto zipped = svzip1(zipped_bottom, zipped_top); + svst1(pg, output_ptr + x, zipped); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + broadcast_input, non_broadcast_input, output); } else { @@ -334,39 +350,44 @@ void elementwise_comparison_quantized_op(const ITensor *in1, const ITensor *in2, const auto in2_voffset = svdup_n(in2->info()->quantization_info().uniform().offset); const auto in2_vscale = svdup_n(in2->info()->quantization_info().uniform().scale); - execute_window_loop(win, [&](const Coordinates &) - { - auto output_ptr = reinterpret_cast(output.ptr()); - const auto input1_ptr = reinterpret_cast(input1.ptr()); - const auto input2_ptr = reinterpret_cast(input2.ptr()); + execute_window_loop( + win, + [&](const Coordinates &) + { + auto output_ptr = reinterpret_cast(output.ptr()); + const auto input1_ptr = reinterpret_cast(input1.ptr()); + const auto input2_ptr = reinterpret_cast(input2.ptr()); - int x = window_start_x; + int x = window_start_x; - svbool_t pg = wrapper::svwhilelt(x, window_end_x); - do - { - const auto in1 = load_quantized(input1_ptr + x, pg, in1_voffset, in1_vscale); - const auto in2 = load_quantized(input2_ptr + x, pg, in2_voffset, in2_vscale); - const auto result = svcreate4( - elementwise_comparison_op(pg, svget4(in1, 0), svget4(in2, 0), op), - elementwise_comparison_op(pg, svget4(in1, 1), svget4(in2, 1), op), - elementwise_comparison_op(pg, svget4(in1, 2), svget4(in2, 2), op), - elementwise_comparison_op(pg, svget4(in1, 3), svget4(in2, 3), op)); - - const auto zipped_bottom = svzip1(svget4(result, 0), svget4(result, 1)); - const auto zipped_top = svzip1(svget4(result, 2), svget4(result, 3)); - const auto zipped = svzip1(zipped_bottom, zipped_top); - svst1(pg, output_ptr + x, zipped); - - x += wrapper::svcnt(); - pg = wrapper::svwhilelt(x, window_end_x); - } - while(svptest_any(all_true_pg, pg)); - }, - input1, input2, output); + svbool_t pg = wrapper::svwhilelt(x, window_end_x); + do + { + const auto in1 = load_quantized(input1_ptr + x, pg, in1_voffset, in1_vscale); + const auto in2 = load_quantized(input2_ptr + x, pg, in2_voffset, in2_vscale); + const auto result = + svcreate4(elementwise_comparison_op(pg, svget4(in1, 0), + svget4(in2, 0), op), + elementwise_comparison_op(pg, svget4(in1, 1), + svget4(in2, 1), op), + elementwise_comparison_op(pg, svget4(in1, 2), + svget4(in2, 2), op), + elementwise_comparison_op(pg, svget4(in1, 3), + svget4(in2, 3), op)); + + const auto zipped_bottom = svzip1(svget4(result, 0), svget4(result, 1)); + const auto zipped_top = svzip1(svget4(result, 2), svget4(result, 3)); + const auto zipped = svzip1(zipped_bottom, zipped_top); + svst1(pg, output_ptr + x, zipped); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, window_end_x); + } while (svptest_any(all_true_pg, pg)); + }, + input1, input2, output); } } } // namespace cpu } // namespace arm_compute -#endif /* SRC_CORE_SVE_KERNELS_ELEMENTWISE_QUANTIZED_LIST_H */ \ No newline at end of file +#endif /* SRC_CORE_SVE_KERNELS_ELEMENTWISE_QUANTIZED_LIST_H */ diff --git a/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8.cpp b/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8.cpp index 7435bb4f29..5cc66642d7 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/sve2/impl.h" namespace arm_compute { @@ -34,27 +35,72 @@ void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITe return elementwise_arithmetic_quantized_op(in1, in2, out, op, window); } -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template -void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) +void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window) { return elementwise_comparison_quantized_op(in1, in2, out, op, window); } -template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); } // namespace cpu } // namespace arm_compute diff --git a/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8_signed.cpp b/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8_signed.cpp index 1027a1eed0..165e0c05fa 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8_signed.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/sve2/qasymm8_signed.cpp @@ -23,6 +23,7 @@ */ #include "arm_compute/core/Helpers.h" + #include "src/cpu/kernels/elementwise_binary/generic/sve2/impl.h" namespace arm_compute { @@ -34,27 +35,70 @@ void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *i return elementwise_arithmetic_quantized_op(in1, in2, out, op, window); } -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); template -void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) +void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window) { return elementwise_comparison_quantized_op(in1, in2, out, op, window); } -template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_comparison_elementwise_binary( + const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); +template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); +template void sve2_qasymm8_signed_comparison_elementwise_binary(const ITensor *in1, + const ITensor *in2, + ITensor *out, + const Window &window); } // namespace cpu } // namespace arm_compute -- cgit v1.2.1