From 373b407558f99eb4bba632c170d03d807941dd2a Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 20 Jan 2021 16:41:12 +0000 Subject: Make Softmax kernels and operator stateless COMPMID-3997 Change-Id: I3a3cc76d8247dd769d9a5e6e171d718ea909312c Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4986 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- src/core/cpu/kernels/softmax/impl/NEON/list.h | 425 +++++++++++++++++++++++++ src/core/cpu/kernels/softmax/impl/SVE/list.h | 429 ++++++++++++++++++++++++++ 2 files changed, 854 insertions(+) create mode 100644 src/core/cpu/kernels/softmax/impl/NEON/list.h create mode 100644 src/core/cpu/kernels/softmax/impl/SVE/list.h (limited to 'src/core/cpu/kernels/softmax') diff --git a/src/core/cpu/kernels/softmax/impl/NEON/list.h b/src/core/cpu/kernels/softmax/impl/NEON/list.h new file mode 100644 index 0000000000..1aa7e8fac7 --- /dev/null +++ b/src/core/cpu/kernels/softmax/impl/NEON/list.h @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_CORE_NEON_KERNELS_SOFTMAX_LIST_H +#define SRC_CORE_NEON_KERNELS_SOFTMAX_LIST_H + +#include "src/core/NEON/NEFixedPoint.h" +#include "src/core/NEON/NEMath.h" +#include "src/core/NEON/wrapper/wrapper.h" +#include "support/SaturateCast.h" + +namespace arm_compute +{ +namespace cpu +{ +namespace +{ +template +int_vec_type convert_float_to_int(const float_vec_type &in); + +template +float_vec_type convert_int_to_float(const int_vec_type &in); + +template <> +uint8x16_t convert_float_to_int(const float32x4x4_t &in) +{ + uint8x16_t out; + convert_float32x4x4_to_uint8x16(in, out); + return out; +} + +template <> +int8x16_t convert_float_to_int(const float32x4x4_t &in) +{ + int8x16_t out; + convert_float32x4x4_to_int8x16(in, out); + return out; +} + +template <> +float32x4x4_t convert_int_to_float(const uint8x16_t &in) +{ + return convert_uint8x16_to_float32x4x4(in); +} + +template <> +float32x4x4_t convert_int_to_float(const int8x16_t &in) +{ + return convert_int8x16_to_float32x4x4(in); +} +} // namespace + +template +void neon_logits_1d_max(const ITensor *in, ITensor *out, const Window &window) +{ + /** NEON vector tag type. */ + using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t; + + constexpr int window_step_x = 16 / sizeof(T); + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Window win{ window }; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + Iterator input(in, win); + Iterator output(out, win); + + const int sum_stages = log2(window_step_x / 2); + execute_window_loop(win, [&](const Coordinates &) + { + // Get pointers + const auto in_ptr = reinterpret_cast(input.ptr()); + const auto out_ptr = reinterpret_cast(output.ptr()); + + // Init max value + auto vec_max = wrapper::vdup_n(support::cpp11::lowest(), ExactTagType{}); + int x = window_start_x; + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const auto current_value = wrapper::vloadq(in_ptr + x); + vec_max = wrapper::vmax(vec_max, current_value); + } + auto carry_max = wrapper::vpmax(wrapper::vgethigh(vec_max), wrapper::vgetlow(vec_max)); + + for(int i = 0; i < sum_stages; ++i) + { + carry_max = wrapper::vpmax(carry_max, carry_max); + } + T max_val = wrapper::vgetlane(carry_max, 0); + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + max_val = *(in_ptr + x) > max_val ? *(in_ptr + x) : max_val; + } + + *out_ptr = max_val; + }, + input, output); +} + +template +void neon_softmax_logits_1d_quantized(const ITensor *in, const ITensor *max, void *const tmp, + ITensor *out, float beta, bool is_log, const Window &window) +{ + static_assert(std::is_same::value + || std::is_same::value, + "quantized type should be either qasymm8_t or qasymm8_signed_t."); + + const int start_x = in->info()->valid_region().anchor.x(); + const int input_width = in->info()->valid_region().shape.x(); + + const float scale_beta = -beta * in->info()->quantization_info().uniform().scale; + const auto scale_beta_vec = vdupq_n_f32(scale_beta); + + Iterator in_it(in, window); + Iterator max_it(max, window); + Iterator out_it(out, window); + constexpr int vec_size = 16; + + execute_window_loop(window, [&](const Coordinates &) + { + /* Get pointers */ + const auto in_ptr = reinterpret_cast(in_it.ptr()) + start_x; + const auto out_ptr = reinterpret_cast(out_it.ptr()) + start_x; + const auto tmp_ptr = reinterpret_cast(tmp); + + float sum{}; + float sum_inversed{}; + + /* Compute exponentials and sum */ + { + /* Get max value */ + const auto max_val = *reinterpret_cast(max_it.ptr()); + const auto vec_max = wrapper::vdup_n(max_val, wrapper::traits::vector_128_tag{}); + + /* Init sum to zero */ + float32x4x4_t vec_sum = + { + vdupq_n_f32(0.f), + vdupq_n_f32(0.f), + vdupq_n_f32(0.f), + vdupq_n_f32(0.f), + }; + + /* Loop over row and compute exponentials and sum */ + int x = 0; + for(; x <= (input_width - vec_size); x += vec_size) + { + auto vec_elements = wrapper::vloadq(in_ptr + x); + vec_elements = wrapper::vqsub(vec_max, vec_elements); + auto vec_elements_flt = convert_int_to_float(vec_elements); + + if(is_log) + { + vec_elements_flt.val[0] = vmulq_f32(vec_elements_flt.val[0], scale_beta_vec); + vec_elements_flt.val[1] = vmulq_f32(vec_elements_flt.val[1], scale_beta_vec); + vec_elements_flt.val[2] = vmulq_f32(vec_elements_flt.val[2], scale_beta_vec); + vec_elements_flt.val[3] = vmulq_f32(vec_elements_flt.val[3], scale_beta_vec); + vec_sum.val[0] = vaddq_f32(vec_sum.val[0], vexpq_f32(vec_elements_flt.val[0])); + vec_sum.val[1] = vaddq_f32(vec_sum.val[1], vexpq_f32(vec_elements_flt.val[1])); + vec_sum.val[2] = vaddq_f32(vec_sum.val[2], vexpq_f32(vec_elements_flt.val[2])); + vec_sum.val[3] = vaddq_f32(vec_sum.val[3], vexpq_f32(vec_elements_flt.val[3])); + } + else + { + vec_elements_flt.val[0] = vexpq_f32(vmulq_f32(vec_elements_flt.val[0], scale_beta_vec)); + vec_elements_flt.val[1] = vexpq_f32(vmulq_f32(vec_elements_flt.val[1], scale_beta_vec)); + vec_elements_flt.val[2] = vexpq_f32(vmulq_f32(vec_elements_flt.val[2], scale_beta_vec)); + vec_elements_flt.val[3] = vexpq_f32(vmulq_f32(vec_elements_flt.val[3], scale_beta_vec)); + vec_sum.val[0] = vaddq_f32(vec_sum.val[0], vec_elements_flt.val[0]); + vec_sum.val[1] = vaddq_f32(vec_sum.val[1], vec_elements_flt.val[1]); + vec_sum.val[2] = vaddq_f32(vec_sum.val[2], vec_elements_flt.val[2]); + vec_sum.val[3] = vaddq_f32(vec_sum.val[3], vec_elements_flt.val[3]); + } + + vst4q_f32(tmp_ptr + x, vec_elements_flt); + } + + /* Reduce sum */ + const auto sum_16_byte = vaddq_f32(vaddq_f32(vec_sum.val[0], vec_sum.val[1]), vaddq_f32(vec_sum.val[2], vec_sum.val[3])); + auto sum_res = vpadd_f32(vget_high_f32(sum_16_byte), vget_low_f32(sum_16_byte)); + sum_res = vpadd_f32(sum_res, sum_res); + sum = wrapper::vgetlane(sum_res, 0); + + /* Run remaining elements */ + for(; x < input_width; ++x) + { + float element{}; + if(is_log) + { + element = (max_val - in_ptr[x]) * scale_beta; + sum += std::exp(element); + } + else + { + element = std::exp((max_val - in_ptr[x]) * scale_beta); + sum += element; + } + + tmp_ptr[x] = element; + } + + if(!is_log) + { + sum_inversed = 256.f / sum; + } + else + { + sum = std::log(sum); + } + } + + /* Normalize exponentials */ + { + constexpr bool is_qasymm8_signed = std::is_same::value; + /* Loop over row and compute softmax */ + int x = 0; + for(; x <= (input_width - vec_size); x += vec_size) + { + using int_vec_type = wrapper::traits::neon_vector_t; + float32x4x4_t vec_in = vld4q_f32(tmp_ptr + x); + int_vec_type normalized_value{}; + if(is_log) + { + const float32x4x4_t sub = + { + vsubq_f32(vec_in.val[0], vdupq_n_f32(sum)), + vsubq_f32(vec_in.val[1], vdupq_n_f32(sum)), + vsubq_f32(vec_in.val[2], vdupq_n_f32(sum)), + vsubq_f32(vec_in.val[3], vdupq_n_f32(sum)), + }; + normalized_value = convert_float_to_int(sub); + } + else + { + float32x4x4_t mul = + { + vmulq_f32(vec_in.val[0], vdupq_n_f32(sum_inversed)), + vmulq_f32(vec_in.val[1], vdupq_n_f32(sum_inversed)), + vmulq_f32(vec_in.val[2], vdupq_n_f32(sum_inversed)), + vmulq_f32(vec_in.val[3], vdupq_n_f32(sum_inversed)), + }; + + if(is_qasymm8_signed) + { + const auto offset_vec = wrapper::vdup_n(128.f, wrapper::traits::vector_128_tag{}); + mul.val[0] = wrapper::vsub(mul.val[0], offset_vec); + mul.val[1] = wrapper::vsub(mul.val[1], offset_vec); + mul.val[2] = wrapper::vsub(mul.val[2], offset_vec); + mul.val[3] = wrapper::vsub(mul.val[3], offset_vec); + } + + normalized_value = convert_float_to_int(mul); + } + wrapper::vstore(out_ptr + x, normalized_value); + } + /* Run remaining elements */ + for(; x < input_width; ++x) + { + if(is_log) + { + out_ptr[x] = utils::cast::saturate_cast(tmp_ptr[x] - sum); + } + else + { + out_ptr[x] = utils::cast::saturate_cast((tmp_ptr[x] * sum_inversed) - (is_qasymm8_signed ? 128.f : 0)); + } + } + } + }, + in_it, max_it, out_it); +} + +template +void neon_softmax_logits_1d_float(const ITensor *in, const ITensor *max, void *const tmp, + ITensor *out, const float beta, bool is_log, const Window &window) +{ + const int start_x = in->info()->valid_region().anchor.x(); + const int input_width = in->info()->valid_region().shape.x(); + + Iterator in_it(in, window); + Iterator max_it(max, window); + Iterator out_it(out, window); + + /** NEON vector tag type. */ + using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t; + + constexpr int vec_size = 16 / sizeof(T); + const int sum_stages = log2(vec_size / 2); + + execute_window_loop(window, [&](const Coordinates &) + { + /* Get pointers */ + const auto in_ptr = reinterpret_cast(in_it.ptr()) + start_x; + const auto out_ptr = reinterpret_cast(out_it.ptr()) + start_x; + const auto tmp_ptr = reinterpret_cast(tmp); + + T sum{}; + T sum_inversed{}; + + /* Compute exponentials and sum */ + { + /* Get max value */ + const auto max_val = *reinterpret_cast(max_it.ptr()); + const auto vec_max = wrapper::vdup_n(max_val, ExactTagType{}); + + /* Init sum to zero */ + auto vec_sum = wrapper::vdup_n(static_cast(0), ExactTagType{}); + + /* Loop over row and compute exponentials and sum */ + int x = 0; + for(; x <= (input_width - vec_size); x += vec_size) + { + auto vec_elements = wrapper::vloadq(in_ptr + x); + vec_elements = wrapper::vsub(vec_elements, vec_max); + if(is_log) + { + vec_elements = wrapper::vmul(vec_elements, wrapper::vdup_n(static_cast(beta), ExactTagType{})); + vec_sum = wrapper::vadd(vec_sum, wrapper::vexpq(vec_elements)); + } + else + { + vec_elements = wrapper::vexpq(wrapper::vmul(vec_elements, wrapper::vdup_n(static_cast(beta), ExactTagType{}))); + vec_sum = wrapper::vadd(vec_sum, vec_elements); + } + wrapper::vstore(tmp_ptr + x, vec_elements); + } + + /* Reduce sum */ + auto sum_res = wrapper::vpadd(wrapper::vgethigh(vec_sum), wrapper::vgetlow(vec_sum)); + for(int i = 0; i < sum_stages; ++i) + { + sum_res = wrapper::vpadd(sum_res, sum_res); + } + sum = wrapper::vgetlane(sum_res, 0); + + /* Run remaining elements */ + for(; x < input_width; ++x) + { + T element{}; + + if(is_log) + { + element = (in_ptr[x] - max_val) * beta; + sum += std::exp(element); + } + else + { + element = std::exp((in_ptr[x] - max_val) * beta); + sum += element; + } + tmp_ptr[x] = element; + } + + if(!is_log) + { + sum_inversed = T(1) / sum; + } + else + { + sum = static_cast(std::log(sum)); + } + } + + /* Normalize exponentials */ + { + /* Loop over row and compute softmax */ + int x = 0; + for(; x <= (input_width - vec_size); x += vec_size) + { + auto vec_in = wrapper::vloadq(tmp_ptr + x); + auto normalized_value = wrapper::vdup_n(static_cast(0), ExactTagType{}); + if(is_log) + { + normalized_value = wrapper::vsub(vec_in, wrapper::vdup_n(static_cast(sum), ExactTagType{})); + } + else + { + normalized_value = wrapper::vmul(vec_in, wrapper::vdup_n(static_cast(sum_inversed), ExactTagType{})); + } + wrapper::vstore(out_ptr + x, normalized_value); + } + /* Run remaining elements */ + for(; x < input_width; ++x) + { + if(is_log) + { + out_ptr[x] = tmp_ptr[x] - sum; + } + else + { + out_ptr[x] = tmp_ptr[x] * sum_inversed; + } + } + } + }, + in_it, max_it, out_it); +} + +} // namespace cpu +} // namespace arm_compute + +#endif /* SRC_CORE_NEON_KERNELS_SOFTMAX_LIST_H */ diff --git a/src/core/cpu/kernels/softmax/impl/SVE/list.h b/src/core/cpu/kernels/softmax/impl/SVE/list.h new file mode 100644 index 0000000000..0936bd5a56 --- /dev/null +++ b/src/core/cpu/kernels/softmax/impl/SVE/list.h @@ -0,0 +1,429 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_CORE_SVE_KERNELS_SOFTMAX_LIST_H +#define SRC_CORE_SVE_KERNELS_SOFTMAX_LIST_H + +#if defined(__ARM_FEATURE_SVE) +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/Traits.h" +#include "src/core/NEON/SVEMath.h" +#include "src/core/NEON/wrapper/intrinsics/intrinsics.h" +#include + +namespace arm_compute +{ +namespace cpu +{ +namespace +{ +#if defined(__ARM_FEATURE_SVE2) +template +int_vec_type convert_float_to_int(const svfloat32_t &in_0, const svfloat32_t &in_1, const svfloat32_t &in_2, const svfloat32_t &in_3); + +template <> +svuint8_t convert_float_to_int(const svfloat32_t &in_0, const svfloat32_t &in_1, const svfloat32_t &in_2, const svfloat32_t &in_3) +{ + svuint8_t out; + const auto all_true_pg = svptrue_b32(); + auto tmp_0 = svcvt_u32_f32_z(all_true_pg, in_0); + auto tmp_1 = svcvt_u32_f32_z(all_true_pg, in_1); + auto tmp_2 = svcvt_u32_f32_z(all_true_pg, in_2); + auto tmp_3 = svcvt_u32_f32_z(all_true_pg, in_3); + + auto tmp_16_0 = svqxtnt_u32(svqxtnb_u32(tmp_0), tmp_1); + auto tmp_16_1 = svqxtnt_u32(svqxtnb_u32(tmp_2), tmp_3); + + auto tmp_16_uzp_0 = svuzp1(tmp_16_0, tmp_16_0); + auto tmp_16_uzp_1 = svuzp2(tmp_16_0, tmp_16_0); + auto tmp_16_uzp_2 = svuzp1(tmp_16_1, tmp_16_1); + auto tmp_16_uzp_3 = svuzp2(tmp_16_1, tmp_16_1); + + auto pg = svwhilelt_b16_s32(0, svcnth() / 2); + + tmp_16_0 = svsplice(pg, tmp_16_uzp_0, tmp_16_uzp_1); + tmp_16_1 = svsplice(pg, tmp_16_uzp_2, tmp_16_uzp_3); + + out = svqxtnt_u16(svqxtnb_u16(tmp_16_0), tmp_16_1); + + auto out_uzp_0 = svuzp1(out, out); + auto out_uzp_1 = svuzp2(out, out); + + pg = svwhilelt_b8_s32(0, svcntb() / 2); + out = svsplice(pg, out_uzp_0, out_uzp_1); + + return out; +} + +template <> +svint8_t convert_float_to_int(const svfloat32_t &in_0, const svfloat32_t &in_1, const svfloat32_t &in_2, const svfloat32_t &in_3) +{ + svint8_t out; + const auto all_true_pg = svptrue_b32(); + auto tmp_0 = svcvt_s32_f32_z(all_true_pg, in_0); + auto tmp_1 = svcvt_s32_f32_z(all_true_pg, in_1); + auto tmp_2 = svcvt_s32_f32_z(all_true_pg, in_2); + auto tmp_3 = svcvt_s32_f32_z(all_true_pg, in_3); + + auto tmp_16_0 = svqxtnt_s32(svqxtnb_s32(tmp_0), tmp_1); + auto tmp_16_1 = svqxtnt_s32(svqxtnb_s32(tmp_2), tmp_3); + + auto tmp_16_uzp_0 = svuzp1(tmp_16_0, tmp_16_0); + auto tmp_16_uzp_1 = svuzp2(tmp_16_0, tmp_16_0); + auto tmp_16_uzp_2 = svuzp1(tmp_16_1, tmp_16_1); + auto tmp_16_uzp_3 = svuzp2(tmp_16_1, tmp_16_1); + + auto pg = svwhilelt_b16_s32(0, svcnth() / 2); + + tmp_16_0 = svsplice(pg, tmp_16_uzp_0, tmp_16_uzp_1); + tmp_16_1 = svsplice(pg, tmp_16_uzp_2, tmp_16_uzp_3); + + out = svqxtnt_s16(svqxtnb_s16(tmp_16_0), tmp_16_1); + + auto out_uzp_0 = svuzp1(out, out); + auto out_uzp_1 = svuzp2(out, out); + + pg = svwhilelt_b8_s32(0, svcntb() / 2); + out = svsplice(pg, out_uzp_0, out_uzp_1); + + return out; +} +#endif /* defined(__ARM_FEATURE_SVE2) */ +} // namespace + +template +void sve_logits_1d_max(const ITensor *in, ITensor *out, const Window &window) +{ + const auto all_true_pg = wrapper::svptrue(); + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Window win{ window }; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + Iterator input(in, win); + Iterator output(out, win); + + execute_window_loop(win, [&](const Coordinates &) + { + // Get pointers + const auto in_ptr = reinterpret_cast(input.ptr()); + const auto out_ptr = reinterpret_cast(output.ptr()); + + // Init max value + auto vec_max = wrapper::svdup_n(support::cpp11::lowest()); + + int x = window_start_x; + svbool_t pg = wrapper::svwhilelt(x, window_end_x); + do + { + const auto current_value = svld1(pg, in_ptr + x); + vec_max = svmax_m(pg, vec_max, current_value); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, window_end_x); + } + while(svptest_any(all_true_pg, pg)); + + auto max_val = svmaxv(all_true_pg, vec_max); + + *out_ptr = max_val; + }, + input, output); +} + +#if defined(__ARM_FEATURE_SVE2) +template +void sve_softmax_logits_1d_quantized(const ITensor *in, const ITensor *max, void *const tmp, + ITensor *out, float beta, bool is_log, const Window &window) +{ + const int start_x = in->info()->valid_region().anchor.x(); + const int input_width = in->info()->valid_region().shape.x(); + + const float scale_beta = -beta * in->info()->quantization_info().uniform().scale; + const auto scale_beta_vec = svdup_n_f32(scale_beta); + + Iterator in_it(in, window); + Iterator max_it(max, window); + Iterator out_it(out, window); + const auto all_true_pg = wrapper::svptrue(); + using SVEType = typename wrapper::traits::sve_vector::type; + + const int inc_1 = static_cast(svcntw()); + const int inc_2 = static_cast(2 * svcntw()); + const int inc_3 = static_cast(3 * svcntw()); + + execute_window_loop(window, [&](const Coordinates &) + { + /* Get pointers */ + const auto in_ptr = reinterpret_cast(in_it.ptr()) + start_x; + const auto out_ptr = reinterpret_cast(out_it.ptr()) + start_x; + const auto tmp_ptr = reinterpret_cast(tmp); + + float sum{}; + + /* Compute exponentials and sum */ + { + /* Get max value */ + const auto max_val = *reinterpret_cast(max_it.ptr()); + const auto vec_max = wrapper::svdup_n(max_val); + + /* Init sum to zero */ + auto vec_sum_0 = svdup_n_f32(0.f); + auto vec_sum_1 = svdup_n_f32(0.f); + auto vec_sum_2 = svdup_n_f32(0.f); + auto vec_sum_3 = svdup_n_f32(0.f); + + /* Loop over row and compute exponentials and sum */ + int x = 0; + svbool_t pg = wrapper::svwhilelt(x, input_width); + svbool_t pg_0 = svunpklo(svunpklo(pg)); + svbool_t pg_1 = svunpkhi(svunpklo(pg)); + svbool_t pg_2 = svunpklo(svunpkhi(pg)); + svbool_t pg_3 = svunpkhi(svunpkhi(pg)); + do + { + auto vec_elements = svld1(pg, in_ptr + x); + vec_elements = svsub_z(pg, vec_max, vec_elements); + + auto vec_elements_flt_0 = svcvt_f32_z(pg_0, svunpklo(svunpklo(vec_elements))); + auto vec_elements_flt_1 = svcvt_f32_z(pg_1, svunpkhi(svunpklo(vec_elements))); + auto vec_elements_flt_2 = svcvt_f32_z(pg_2, svunpklo(svunpkhi(vec_elements))); + auto vec_elements_flt_3 = svcvt_f32_z(pg_3, svunpkhi(svunpkhi(vec_elements))); + + if(is_log) + { + vec_elements_flt_0 = svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec); + vec_elements_flt_1 = svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec); + vec_elements_flt_2 = svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec); + vec_elements_flt_3 = svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec); + vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, svexp_f32_z(pg_0, vec_elements_flt_0)); + vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, svexp_f32_z(pg_1, vec_elements_flt_1)); + vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, svexp_f32_z(pg_2, vec_elements_flt_2)); + vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, svexp_f32_z(pg_3, vec_elements_flt_3)); + } + else + { + vec_elements_flt_0 = svexp_f32_z(pg_0, svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec)); + vec_elements_flt_1 = svexp_f32_z(pg_1, svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec)); + vec_elements_flt_2 = svexp_f32_z(pg_2, svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec)); + vec_elements_flt_3 = svexp_f32_z(pg_3, svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec)); + vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, vec_elements_flt_0); + vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, vec_elements_flt_1); + vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, vec_elements_flt_2); + vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, vec_elements_flt_3); + } + + svst1_f32(pg_0, tmp_ptr + x, vec_elements_flt_0); + svst1_f32(pg_1, tmp_ptr + x + inc_1, vec_elements_flt_1); + svst1_f32(pg_2, tmp_ptr + x + inc_2, vec_elements_flt_2); + svst1_f32(pg_3, tmp_ptr + x + inc_3, vec_elements_flt_3); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, input_width); + pg_0 = svunpklo(svunpklo(pg)); + pg_1 = svunpkhi(svunpklo(pg)); + pg_2 = svunpklo(svunpkhi(pg)); + pg_3 = svunpkhi(svunpkhi(pg)); + } + while(svptest_any(all_true_pg, pg)); + + /* Reduce sum */ + const auto vec_sum = svadd_f32_z(all_true_pg, svadd_f32_z(all_true_pg, vec_sum_0, vec_sum_1), svadd_f32_z(all_true_pg, vec_sum_2, vec_sum_3)); + sum = svaddv_f32(all_true_pg, vec_sum); + + /* Run remaining elements */ + x = 0; + if(is_log) + { + sum = std::log(sum); + } + else + { + sum = 256.f / sum; + } + } + + /* Normalize exponentials */ + { + constexpr bool is_qasymm8_signed = std::is_same::value; + /* Loop over row and compute softmax */ + int x = 0; + svbool_t pg = wrapper::svwhilelt(x, input_width); + svbool_t pg_0 = svunpklo(svunpklo(pg)); + svbool_t pg_1 = svunpkhi(svunpklo(pg)); + svbool_t pg_2 = svunpklo(svunpkhi(pg)); + svbool_t pg_3 = svunpkhi(svunpkhi(pg)); + do + { + auto vec_in_0 = svld1_f32(pg_0, tmp_ptr + x); + auto vec_in_1 = svld1_f32(pg_1, tmp_ptr + x + inc_1); + auto vec_in_2 = svld1_f32(pg_2, tmp_ptr + x + inc_2); + auto vec_in_3 = svld1_f32(pg_3, tmp_ptr + x + inc_3); + + svfloat32_t res_0{}; + svfloat32_t res_1{}; + svfloat32_t res_2{}; + svfloat32_t res_3{}; + + if(is_log) + { + res_0 = svsub_f32_z(pg_0, vec_in_0, svdup_n_f32(sum)); + res_1 = svsub_f32_z(pg_1, vec_in_1, svdup_n_f32(sum)); + res_2 = svsub_f32_z(pg_2, vec_in_2, svdup_n_f32(sum)); + res_3 = svsub_f32_z(pg_3, vec_in_3, svdup_n_f32(sum)); + } + else + { + res_0 = svmul_f32_z(pg_0, vec_in_0, svdup_n_f32(sum)); + res_1 = svmul_f32_z(pg_1, vec_in_1, svdup_n_f32(sum)); + res_2 = svmul_f32_z(pg_2, vec_in_2, svdup_n_f32(sum)); + res_3 = svmul_f32_z(pg_3, vec_in_3, svdup_n_f32(sum)); + + if(is_qasymm8_signed) + { + const auto offset_vec = svdup_n_f32(128.f); + res_0 = svsub_z(pg_0, vec_in_0, offset_vec); + res_1 = svsub_z(pg_1, vec_in_1, offset_vec); + res_2 = svsub_z(pg_2, vec_in_2, offset_vec); + res_3 = svsub_z(pg_3, vec_in_3, offset_vec); + } + } + + // Store value + const auto out = convert_float_to_int(res_0, res_1, res_2, res_3); + svst1(pg, out_ptr + x, out); + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, input_width); + pg_0 = svunpklo(svunpklo(pg)); + pg_1 = svunpkhi(svunpklo(pg)); + pg_2 = svunpklo(svunpkhi(pg)); + pg_3 = svunpkhi(svunpkhi(pg)); + } + while(svptest_any(all_true_pg, pg)); + } + }, + in_it, max_it, out_it); +} +#endif /* defined(__ARM_FEATURE_SVE2) */ + +template +void sve_softmax_logits_1d_float(const ITensor *in, const ITensor *max, void *const tmp, + ITensor *out, const float beta, bool is_log, const Window &window) +{ + const int start_x = in->info()->valid_region().anchor.x(); + const int input_width = in->info()->valid_region().shape.x(); + + Iterator in_it(in, window); + Iterator max_it(max, window); + Iterator out_it(out, window); + + const auto all_true_pg = wrapper::svptrue(); + + execute_window_loop(window, [&](const Coordinates &) + { + /* Get pointers */ + const auto in_ptr = reinterpret_cast(in_it.ptr()) + start_x; + const auto out_ptr = reinterpret_cast(out_it.ptr()) + start_x; + const auto tmp_ptr = reinterpret_cast(tmp); + + ScalarType sum{ 0 }; + + /* Compute exponentials and sum */ + { + /* Get max value */ + const auto max_val = *reinterpret_cast(max_it.ptr()); + const auto vec_max = wrapper::svdup_n(max_val); + + /* Init sum to zero */ + auto vec_sum = wrapper::svdup_n(static_cast(0)); + + /* Loop over row and compute exponentials and sum */ + int x = 0; + svbool_t pg = wrapper::svwhilelt(x, input_width); + do + { + auto vec_elements = svld1(pg, in_ptr + x); + vec_elements = svsub_z(pg, vec_elements, vec_max); + if(is_log) + { + vec_elements = svmul_z(pg, vec_elements, wrapper::svdup_n(static_cast(beta))); + vec_sum = svadd_m(pg, vec_sum, wrapper::svexp_z(pg, vec_elements)); + } + else + { + vec_elements = wrapper::svexp_z(pg, svmul_z(pg, vec_elements, wrapper::svdup_n(static_cast(beta)))); + vec_sum = svadd_m(pg, vec_sum, vec_elements); + } + svst1(pg, tmp_ptr + x, vec_elements); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, input_width); + } + while(svptest_any(all_true_pg, pg)); + + /* Reduce sum */ + sum = svaddv(all_true_pg, vec_sum); + + if(is_log) + { + sum = static_cast(std::log(sum)); + } + else + { + sum = ScalarType(1) / sum; + } + } + + /* Normalize exponentials */ + { + /* Loop over row and compute softmax */ + int x = 0; + svbool_t pg = wrapper::svwhilelt(x, input_width); + do + { + auto vec_in = svld1(pg, tmp_ptr + x); + auto normalized_value = wrapper::svdup_n(static_cast(0)); + if(is_log) + { + normalized_value = svsub_z(pg, vec_in, wrapper::svdup_n(static_cast(sum))); + } + else + { + normalized_value = svmul_z(pg, vec_in, wrapper::svdup_n(static_cast(sum))); + } + svst1(pg, out_ptr + x, normalized_value); + + x += wrapper::svcnt(); + pg = wrapper::svwhilelt(x, input_width); + } + while(svptest_any(all_true_pg, pg)); + } + }, + in_it, max_it, out_it); +} + +} // namespace cpu +} // namespace arm_compute +#endif /* defined(__ARM_FEATURE_SVE) */ + +#endif /* SRC_CORE_SVE_KERNELS_SOFTMAX_LIST_H */ -- cgit v1.2.1