diff options
author | Pablo Marquez Tello <pablo.tello@arm.com> | 2023-09-19 14:46:07 +0100 |
---|---|---|
committer | Pablo Marquez Tello <pablo.tello@arm.com> | 2023-09-21 10:20:28 +0000 |
commit | f57d6ec5ff4305d2e388730f6dad004908e6e97a (patch) | |
tree | 5efc93e5699e649057c57660b717b726cf607a7b /src/cpu/kernels/gemm_matrix_add/generic/neon/fp16.cpp | |
parent | e071b5e31004b29afefaa96907032bfd2b4e5a43 (diff) | |
download | ComputeLibrary-f57d6ec5ff4305d2e388730f6dad004908e6e97a.tar.gz |
Gemm changes to enable fp16 in armv8a multi_isa builds
* Code guarded with __ARM_FEATURE_FP16_VECTOR_ARITHMETIC needs
to be moved to an fp16.cpp file to allow compilation with
-march=armv8.2-a+fp16
* fp16.cpp needs to use the templates vector_matrix_multiply_f16() and
matrix_matrix_multiply_f16 which had to be moved from impl.cpp to fp16.cpp
* Partially resolves MLCE-1102
Change-Id: Ic87440797d6f1653c815ab6565972206f5afd0ad
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10345
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu/kernels/gemm_matrix_add/generic/neon/fp16.cpp')
-rw-r--r-- | src/cpu/kernels/gemm_matrix_add/generic/neon/fp16.cpp | 47 |
1 files changed, 46 insertions, 1 deletions
diff --git a/src/cpu/kernels/gemm_matrix_add/generic/neon/fp16.cpp b/src/cpu/kernels/gemm_matrix_add/generic/neon/fp16.cpp index 2d61b72078..505a37174e 100644 --- a/src/cpu/kernels/gemm_matrix_add/generic/neon/fp16.cpp +++ b/src/cpu/kernels/gemm_matrix_add/generic/neon/fp16.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,55 @@ #include "src/cpu/kernels/gemm_matrix_add/generic/neon/impl.h" +#include <arm_neon.h> + namespace arm_compute { namespace cpu { +namespace +{ +void matrix_addition_f16(const ITensor *src, ITensor *dst, const Window &window, float beta) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + const float16x8_t beta_f16 = vdupq_n_f16(beta); + + constexpr int window_step_x = 16; + const auto window_start_x = static_cast<int>(window.x().start()); + const auto window_end_x = static_cast<int>(window.x().end()); + + Window win = window.collapse_if_possible(window, Window::DimZ); + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator in(src, win); + Iterator out(dst, win); + + execute_window_loop(win, [&](const Coordinates &) + { + const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr()); + const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr()); + + int x = window_start_x; + for(; x < (window_end_x - window_step_x); x += window_step_x) + { + float16x8x2_t alpha_ab = vld2q_f16(out_ptr + x); + const float16x8x2_t c = vld2q_f16(in_ptr + x); + // Multiply matrix C by its weight and accumulate + alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16)); + alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16)); + + vst2q_f16(out_ptr + x, alpha_ab); + } + + // Left-over loop + for(; x < window_end_x; ++x) + { + *(out_ptr + x) += *(in_ptr + x) * static_cast<float16_t>(beta); + } + }, + in, out); +} +} // namespace void neon_fp16_gemm_matrix_add(const ITensor *src, ITensor *dst, const Window &window, float beta) { return matrix_addition_f16(src, dst, window, beta); |