aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp954
1 files changed, 528 insertions, 426 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp
index 1b2d7852a8..855d27a151 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2018-2019 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,37 +25,58 @@
#include <algorithm>
+#include "arm_gemm.hpp"
#include "../../asmlib.hpp"
#include "../../utils.hpp"
namespace arm_gemm {
-void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C, int ldc, float beta, int M, int N, int K) {
- const long beta0 = (beta == 0.0f);
+void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
const int K_stride = K;
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
const long regs_count = (K / 4) - 1;
K -= (regs_count + 1) * 4;
const long leftovers = K;
+ float nullbias[256];
+ if (!append && !bias) {
+ memset(nullbias, 0, (4 * get_vector_length<float>() * sizeof(float)));
+ }
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ const float * const minptr = &minval;
+ const float * const maxptr = &maxval;
+
+ switch(act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ minval = 0.0f;
+ break;
+ }
for (int y=0; y<M; y+=4) {
const float * const a_ptr0_base = A + (y * lda);
const unsigned long ldab = lda * sizeof(float);
float *c_ptr0 = C + (y * ldc);
- const unsigned long ldcb = ldc * sizeof(float);
for (int x0=0; x0<N; x0+=(4 * get_vector_length<float>())) {
const long width = std::min((unsigned long)N-x0, (4 * get_vector_length<float>()));
- const float *betaptr = &beta;
long loops = loops_count;
long regs = regs_count;
long temp = 0;
long blocks = leftovers;
const float *a_ptr0 = a_ptr0_base;
const float *b_ptr0 = B + (K_stride * x0);
+ const unsigned long ldcb = ldc * sizeof(float);
+ const float *biasptr = bias ? bias+x0 : nullbias;
switch(M-y) {
case 1:
@@ -69,269 +90,283 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
+ "cbnz %[append], 1f\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "mov z18.s, #0\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z19.s, #0\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "b 2f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
"1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
"ld1w z16.s, p0/z, [%[c_ptr0]]\n"
"ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
"ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "2:\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"addvl %[b_ptr0], %[b_ptr0], #8\n"
- "cbz %[loops], 3f\n"
- "4:\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "subs %[loops], %[loops], #0x1\n"
"fmla z19.s, z11.s, z0.s[2]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
"fmla z16.s, z12.s, z0.s[3]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z17.s, z13.s, z0.s[3]\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[3]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "fmla z17.s, z13.s, z4.s[3]\n"
+ "fmla z18.s, z14.s, z4.s[3]\n"
+ "fmla z19.s, z15.s, z4.s[3]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
"ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
- "b.ne 4b\n"
- "3:\n"
- "cbz %[regs], 5f\n"
+ "b.ne 3b\n"
+ "2:\n"
+ "cbz %[regs], 4f\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
"fmla z19.s, z11.s, z0.s[2]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[3]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
- "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[3]\n"
+ "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
"fmla z17.s, z13.s, z4.s[3]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z18.s, z14.s, z4.s[3]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z19.s, z15.s, z4.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
"fmla z19.s, z11.s, z0.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 5f\n"
+ "4:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
+ "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z19.s, z11.s, z0.s[2]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z16.s, z12.s, z0.s[3]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
- "6:\n"
+ "5:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -350,83 +385,74 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
+ "cbnz %[append], 1f\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
+ "mov z20.d, z16.d\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "mov z21.d, z17.d\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "mov z18.s, #0\n"
+ "mov z22.d, z18.d\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "mov z19.s, #0\n"
+ "mov z23.d, z19.d\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z20.s, #0\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "mov z21.s, #0\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "mov z22.s, #0\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
- "mov z23.s, #0\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "b 2f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
"1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
"ld1w z16.s, p0/z, [%[c_ptr0]]\n"
"ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
"ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
"ld1w z20.s, p0/z, [c_ptr1]\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
"ld1w z21.s, p1/z, [c_ptr1, #1, MUL VL]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
"ld1w z22.s, p2/z, [c_ptr1, #2, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
"ld1w z23.s, p3/z, [c_ptr1, #3, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "fmul z20.s, p7/m, z20.s, z15.s\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "fmul z21.s, p7/m, z21.s, z15.s\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "fmul z22.s, p7/m, z22.s, z15.s\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmul z23.s, p7/m, z23.s, z15.s\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "2:\n"
"addvl %[b_ptr0], %[b_ptr0], #8\n"
- "cbz %[loops], 3f\n"
- "4:\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr1]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
+ "subs %[loops], %[loops], #0x1\n"
"fmla z23.s, z11.s, z1.s[0]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z20.s, z12.s, z1.s[1]\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
"fmla z21.s, z13.s, z1.s[1]\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
@@ -440,12 +466,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
+ "fmla z18.s, z10.s, z0.s[2]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
+ "fmla z22.s, z10.s, z1.s[2]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "fmla z19.s, z11.s, z0.s[2]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[3]\n"
@@ -491,12 +517,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
"fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
+ "fmla z18.s, z10.s, z4.s[2]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
+ "fmla z22.s, z10.s, z5.s[2]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "fmla z19.s, z11.s, z4.s[2]\n"
"fmla z23.s, z11.s, z5.s[2]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[3]\n"
@@ -510,19 +536,19 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
- "b.ne 4b\n"
- "3:\n"
- "cbz %[regs], 5f\n"
+ "b.ne 3b\n"
+ "2:\n"
+ "cbz %[regs], 4f\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr1]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
@@ -545,12 +571,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
+ "fmla z18.s, z10.s, z0.s[2]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
+ "fmla z22.s, z10.s, z1.s[2]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "fmla z19.s, z11.s, z0.s[2]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[3]\n"
@@ -571,9 +597,11 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z5.s[0]\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z21.s, z9.s, z5.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #2\n"
"fmla z22.s, z10.s, z5.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
@@ -592,6 +620,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z15.s, z5.s[1]\n"
"ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
"fmla z21.s, z9.s, z5.s[2]\n"
@@ -607,14 +636,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z14.s, z5.s[3]\n"
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
@@ -622,13 +650,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z10.s, z1.s[0]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
@@ -636,11 +664,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z14.s, z1.s[1]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
"fmla z23.s, z15.s, z1.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
@@ -649,23 +678,26 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z10.s, z1.s[2]\n"
"fmla z19.s, z11.s, z0.s[2]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 5f\n"
+ "4:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p6/z, [a_ptr1]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
+ "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr1]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z23.s, z11.s, z1.s[0]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
+ "addvl a_ptr1, a_ptr1, #1\n"
"fmla z20.s, z12.s, z1.s[1]\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
@@ -678,6 +710,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z15.s, z1.s[1]\n"
"ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
@@ -693,14 +726,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z14.s, z1.s[3]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
"fmla z23.s, z15.s, z1.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z5.s[0]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
"fmla z21.s, z9.s, z5.s[0]\n"
@@ -708,13 +740,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z10.s, z5.s[0]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
"fmla z23.s, z11.s, z5.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z20.s, z12.s, z5.s[1]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
"fmla z21.s, z13.s, z5.s[1]\n"
@@ -722,11 +754,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z14.s, z5.s[1]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
"fmla z23.s, z15.s, z5.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
@@ -735,10 +768,28 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z22.s, z10.s, z5.s[2]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
"fmla z23.s, z11.s, z5.s[2]\n"
- "6:\n"
+ "5:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
+ "fmax z20.s, p7/m, z20.s, z14.s\n"
+ "fmax z21.s, p7/m, z21.s, z14.s\n"
+ "fmax z22.s, p7/m, z22.s, z14.s\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
+ "fmax z23.s, p7/m, z23.s, z14.s\n"
+ "fmin z20.s, p7/m, z20.s, z15.s\n"
+ "fmin z21.s, p7/m, z21.s, z15.s\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
+ "fmin z22.s, p7/m, z22.s, z15.s\n"
+ "fmin z23.s, p7/m, z23.s, z15.s\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
"st1w z20.s, p0, [c_ptr1]\n"
@@ -748,7 +799,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -760,110 +811,97 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"c_ptr2 .req X3\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
"whilelt p6.s, %[temp], %[leftovers]\n"
"whilelt p0.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
+ "cbnz %[append], 1f\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
+ "mov z20.d, z16.d\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "mov z21.d, z17.d\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "mov z18.s, #0\n"
+ "mov z22.d, z18.d\n"
"ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "mov z19.s, #0\n"
+ "mov z23.d, z19.d\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "mov z20.s, #0\n"
+ "mov z24.d, z16.d\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z21.s, #0\n"
+ "mov z25.d, z17.d\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "mov z22.s, #0\n"
+ "mov z26.d, z18.d\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "mov z23.s, #0\n"
+ "mov z27.d, z19.d\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
- "mov z24.s, #0\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
- "mov z25.s, #0\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "mov z26.s, #0\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "mov z27.s, #0\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"add a_ptr1, a_ptr1, #0x10\n"
"add a_ptr2, a_ptr2, #0x10\n"
- "b 2f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
"1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
"ld1w z16.s, p0/z, [%[c_ptr0]]\n"
"ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
"ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
"ld1w z20.s, p0/z, [c_ptr1]\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
"ld1w z21.s, p1/z, [c_ptr1, #1, MUL VL]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
"ld1w z22.s, p2/z, [c_ptr1, #2, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
"ld1w z23.s, p3/z, [c_ptr1, #3, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
"ld1w z24.s, p0/z, [c_ptr2]\n"
- "fmul z20.s, p7/m, z20.s, z15.s\n"
"ld1w z25.s, p1/z, [c_ptr2, #1, MUL VL]\n"
- "fmul z21.s, p7/m, z21.s, z15.s\n"
"ld1w z26.s, p2/z, [c_ptr2, #2, MUL VL]\n"
- "fmul z22.s, p7/m, z22.s, z15.s\n"
"ld1w z27.s, p3/z, [c_ptr2, #3, MUL VL]\n"
- "fmul z23.s, p7/m, z23.s, z15.s\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "fmul z24.s, p7/m, z24.s, z15.s\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "fmul z25.s, p7/m, z25.s, z15.s\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
"ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "fmul z26.s, p7/m, z26.s, z15.s\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "fmul z27.s, p7/m, z27.s, z15.s\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "2:\n"
"addvl %[b_ptr0], %[b_ptr0], #8\n"
- "cbz %[loops], 3f\n"
- "4:\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr1]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr2]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
+ "subs %[loops], %[loops], #0x1\n"
"fmla z22.s, z10.s, z1.s[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z26.s, z10.s, z2.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
"fmla z23.s, z11.s, z1.s[0]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
@@ -887,8 +925,8 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "fmla z21.s, z9.s, z1.s[2]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
@@ -955,8 +993,8 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "fmla z21.s, z9.s, z5.s[2]\n"
"fmla z25.s, z9.s, z6.s[2]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
@@ -982,19 +1020,19 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
- "b.ne 4b\n"
- "3:\n"
- "cbz %[regs], 5f\n"
+ "b.ne 3b\n"
+ "2:\n"
+ "cbz %[regs], 4f\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr1]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr2]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
@@ -1026,8 +1064,8 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "fmla z21.s, z9.s, z1.s[2]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
@@ -1059,10 +1097,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z16.s, z8.s, z4.s[0]\n"
"ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
"fmla z20.s, z8.s, z5.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z24.s, z8.s, z6.s[0]\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #2\n"
"fmla z21.s, z9.s, z5.s[0]\n"
+ "addvl a_ptr2, a_ptr2, #2\n"
"fmla z25.s, z9.s, z6.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
@@ -1090,6 +1131,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z27.s, z15.s, z6.s[1]\n"
"ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
@@ -1113,14 +1155,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
@@ -1132,13 +1173,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z11.s, z0.s[0]\n"
"fmla z23.s, z11.s, z1.s[0]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
@@ -1150,11 +1191,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z15.s, z0.s[1]\n"
"fmla z23.s, z15.s, z1.s[1]\n"
"fmla z27.s, z15.s, z2.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
@@ -1167,24 +1209,28 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z11.s, z0.s[2]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
"fmla z27.s, z11.s, z2.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 5f\n"
+ "4:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p6/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z6.s, p6/z, [a_ptr2]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr1]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr2]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z22.s, z10.s, z1.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #1\n"
"fmla z26.s, z10.s, z2.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
+ "addvl a_ptr2, a_ptr2, #1\n"
"fmla z23.s, z11.s, z1.s[0]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
@@ -1205,6 +1251,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z27.s, z15.s, z2.s[1]\n"
"ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
@@ -1228,14 +1275,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z15.s, z0.s[3]\n"
"fmla z23.s, z15.s, z1.s[3]\n"
"fmla z27.s, z15.s, z2.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z5.s[0]\n"
"fmla z24.s, z8.s, z6.s[0]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
@@ -1247,13 +1293,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z11.s, z4.s[0]\n"
"fmla z23.s, z11.s, z5.s[0]\n"
"fmla z27.s, z11.s, z6.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z20.s, z12.s, z5.s[1]\n"
"fmla z24.s, z12.s, z6.s[1]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
@@ -1265,11 +1311,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z15.s, z4.s[1]\n"
"fmla z23.s, z15.s, z5.s[1]\n"
"fmla z27.s, z15.s, z6.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
@@ -1282,14 +1329,40 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z19.s, z11.s, z4.s[2]\n"
"fmla z23.s, z11.s, z5.s[2]\n"
"fmla z27.s, z11.s, z6.s[2]\n"
- "6:\n"
+ "5:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
+ "fmax z20.s, p7/m, z20.s, z14.s\n"
+ "fmax z21.s, p7/m, z21.s, z14.s\n"
+ "fmax z22.s, p7/m, z22.s, z14.s\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
+ "fmax z23.s, p7/m, z23.s, z14.s\n"
+ "fmin z20.s, p7/m, z20.s, z15.s\n"
+ "fmin z21.s, p7/m, z21.s, z15.s\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
+ "fmin z22.s, p7/m, z22.s, z15.s\n"
+ "fmin z23.s, p7/m, z23.s, z15.s\n"
+ "fmax z24.s, p7/m, z24.s, z14.s\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z14.s\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
+ "fmax z26.s, p7/m, z26.s, z14.s\n"
"st1w z20.s, p0, [c_ptr1]\n"
+ "fmin z24.s, p7/m, z24.s, z15.s\n"
+ "fmin z25.s, p7/m, z25.s, z15.s\n"
+ "fmax z27.s, p7/m, z27.s, z14.s\n"
"st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
+ "fmin z26.s, p7/m, z26.s, z15.s\n"
+ "fmin z27.s, p7/m, z27.s, z15.s\n"
"st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
"st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
"st1w z24.s, p0, [c_ptr2]\n"
@@ -1301,7 +1374,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1316,132 +1389,115 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"c_ptr3 .req X5\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
"whilelt p6.s, %[temp], %[leftovers]\n"
"whilelt p0.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
"incw %[temp], all, mul #1\n"
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
+ "cbnz %[append], 1f\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
+ "mov z20.d, z16.d\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "mov z21.d, z17.d\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "mov z18.s, #0\n"
+ "mov z22.d, z18.d\n"
"ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "mov z19.s, #0\n"
+ "mov z23.d, z19.d\n"
"ld1rqw z3.s, p7/z, [a_ptr3]\n"
- "mov z20.s, #0\n"
+ "mov z24.d, z16.d\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "mov z21.s, #0\n"
+ "mov z25.d, z17.d\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z22.s, #0\n"
+ "mov z26.d, z18.d\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "mov z23.s, #0\n"
+ "mov z27.d, z19.d\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "mov z24.s, #0\n"
+ "mov z28.d, z16.d\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
- "mov z25.s, #0\n"
+ "mov z29.d, z17.d\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
- "mov z26.s, #0\n"
+ "mov z30.d, z18.d\n"
"ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "mov z27.s, #0\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "mov z28.s, #0\n"
+ "mov z31.d, z19.d\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "mov z29.s, #0\n"
"add a_ptr1, a_ptr1, #0x10\n"
- "mov z30.s, #0\n"
"add a_ptr2, a_ptr2, #0x10\n"
- "mov z31.s, #0\n"
"add a_ptr3, a_ptr3, #0x10\n"
- "b 2f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
"1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
"ld1w z16.s, p0/z, [%[c_ptr0]]\n"
"ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
"ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
"ld1w z20.s, p0/z, [c_ptr1]\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
"ld1w z21.s, p1/z, [c_ptr1, #1, MUL VL]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
"ld1w z22.s, p2/z, [c_ptr1, #2, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
"ld1w z23.s, p3/z, [c_ptr1, #3, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
"ld1w z24.s, p0/z, [c_ptr2]\n"
- "fmul z20.s, p7/m, z20.s, z15.s\n"
"ld1w z25.s, p1/z, [c_ptr2, #1, MUL VL]\n"
- "fmul z21.s, p7/m, z21.s, z15.s\n"
"ld1w z26.s, p2/z, [c_ptr2, #2, MUL VL]\n"
- "fmul z22.s, p7/m, z22.s, z15.s\n"
"ld1w z27.s, p3/z, [c_ptr2, #3, MUL VL]\n"
- "fmul z23.s, p7/m, z23.s, z15.s\n"
"ld1w z28.s, p0/z, [c_ptr3]\n"
- "fmul z24.s, p7/m, z24.s, z15.s\n"
"ld1w z29.s, p1/z, [c_ptr3, #1, MUL VL]\n"
- "fmul z25.s, p7/m, z25.s, z15.s\n"
"ld1w z30.s, p2/z, [c_ptr3, #2, MUL VL]\n"
- "fmul z26.s, p7/m, z26.s, z15.s\n"
"ld1w z31.s, p3/z, [c_ptr3, #3, MUL VL]\n"
- "fmul z27.s, p7/m, z27.s, z15.s\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "fmul z28.s, p7/m, z28.s, z15.s\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "fmul z29.s, p7/m, z29.s, z15.s\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
"ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "fmul z30.s, p7/m, z30.s, z15.s\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
"ld1rqw z3.s, p7/z, [a_ptr3]\n"
- "fmul z31.s, p7/m, z31.s, z15.s\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
"ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
"ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
- "add a_ptr3, a_ptr3, #0x10\n"
"ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
"ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
- "2:\n"
"addvl %[b_ptr0], %[b_ptr0], #8\n"
- "cbz %[loops], 3f\n"
- "4:\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr1]\n"
"fmla z28.s, z8.s, z3.s[0]\n"
- "ld1rqw z7.s, p7/z, [a_ptr3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr2]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr3]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
+ "subs %[loops], %[loops], #0x1\n"
"fmla z29.s, z9.s, z3.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z22.s, z10.s, z1.s[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
"fmla z26.s, z10.s, z2.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
"fmla z30.s, z10.s, z3.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
+ "add a_ptr3, a_ptr3, #0x20\n"
"fmla z23.s, z11.s, z1.s[0]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"fmla z31.s, z11.s, z3.s[0]\n"
@@ -1471,8 +1527,8 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z28.s, z8.s, z3.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"fmla z29.s, z9.s, z3.s[2]\n"
@@ -1556,8 +1612,8 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
"fmla z28.s, z8.s, z7.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "fmla z17.s, z9.s, z4.s[2]\n"
"fmla z21.s, z9.s, z5.s[2]\n"
"fmla z25.s, z9.s, z6.s[2]\n"
"fmla z29.s, z9.s, z7.s[2]\n"
@@ -1591,21 +1647,21 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
"fmla z31.s, z15.s, z7.s[3]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
- "b.ne 4b\n"
- "3:\n"
- "cbz %[regs], 5f\n"
+ "b.ne 3b\n"
+ "2:\n"
+ "cbz %[regs], 4f\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
+ "ld1rqw z5.s, p7/z, [a_ptr1]\n"
"fmla z28.s, z8.s, z3.s[0]\n"
- "ld1rqw z7.s, p7/z, [a_ptr3]\n"
+ "ld1rqw z6.s, p7/z, [a_ptr2]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "ld1rqw z7.s, p7/z, [a_ptr3]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
"fmla z29.s, z9.s, z3.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -1644,8 +1700,8 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z28.s, z8.s, z3.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
"ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"fmla z29.s, z9.s, z3.s[2]\n"
@@ -1686,11 +1742,15 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z16.s, z8.s, z4.s[0]\n"
"ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
"fmla z20.s, z8.s, z5.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z24.s, z8.s, z6.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #2\n"
"fmla z28.s, z8.s, z7.s[0]\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
+ "addvl a_ptr2, a_ptr2, #2\n"
"fmla z21.s, z9.s, z5.s[0]\n"
+ "addvl a_ptr3, a_ptr3, #2\n"
"fmla z25.s, z9.s, z6.s[0]\n"
"fmla z29.s, z9.s, z7.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -1725,6 +1785,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z31.s, z15.s, z7.s[1]\n"
"ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
"fmla z28.s, z8.s, z7.s[2]\n"
@@ -1756,14 +1817,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
"fmla z31.s, z15.s, z7.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
"fmla z28.s, z8.s, z3.s[0]\n"
@@ -1779,13 +1839,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z11.s, z1.s[0]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"fmla z31.s, z11.s, z3.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
"fmla z28.s, z12.s, z3.s[1]\n"
@@ -1801,11 +1861,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z15.s, z1.s[1]\n"
"fmla z27.s, z15.s, z2.s[1]\n"
"fmla z31.s, z15.s, z3.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
@@ -1822,25 +1883,30 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z11.s, z1.s[2]\n"
"fmla z27.s, z11.s, z2.s[2]\n"
"fmla z31.s, z11.s, z3.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 5f\n"
+ "4:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z5.s, p6/z, [a_ptr1]\n"
+ "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
"fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z6.s, p6/z, [a_ptr2]\n"
+ "ld1rqw z5.s, p6/z, [a_ptr1]\n"
"fmla z28.s, z8.s, z3.s[0]\n"
- "ld1rqw z7.s, p6/z, [a_ptr3]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
"ld1w z8.s, p7/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
+ "ld1rqw z6.s, p6/z, [a_ptr2]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1rqw z7.s, p6/z, [a_ptr3]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z29.s, z9.s, z3.s[0]\n"
"ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #1\n"
"fmla z22.s, z10.s, z1.s[0]\n"
+ "addvl a_ptr2, a_ptr2, #1\n"
"fmla z26.s, z10.s, z2.s[0]\n"
+ "addvl a_ptr3, a_ptr3, #1\n"
"fmla z30.s, z10.s, z3.s[0]\n"
"ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
@@ -1869,6 +1935,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z31.s, z15.s, z3.s[1]\n"
"ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
+ "addvl %[b_ptr0], %[b_ptr0], #8\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z28.s, z8.s, z3.s[2]\n"
@@ -1900,14 +1967,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z15.s, z1.s[3]\n"
"fmla z27.s, z15.s, z2.s[3]\n"
"fmla z31.s, z15.s, z3.s[3]\n"
- "cbz %[blocks], 6f\n"
- "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "cbz %[blocks], 5f\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z5.s[0]\n"
"fmla z24.s, z8.s, z6.s[0]\n"
"fmla z28.s, z8.s, z7.s[0]\n"
@@ -1923,13 +1989,13 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z11.s, z5.s[0]\n"
"fmla z27.s, z11.s, z6.s[0]\n"
"fmla z31.s, z11.s, z7.s[0]\n"
- "b.eq 6f\n"
- "ld1w z12.s, p7/z, [%[b_ptr0], #-4, MUL VL]\n"
+ "b.eq 5f\n"
+ "ld1w z12.s, p7/z, [%[b_ptr0], #4, MUL VL]\n"
"subs %[blocks], %[blocks], #0x1\n"
- "ld1w z13.s, p7/z, [%[b_ptr0], #-3, MUL VL]\n"
- "ld1w z14.s, p7/z, [%[b_ptr0], #-2, MUL VL]\n"
- "ld1w z15.s, p7/z, [%[b_ptr0], #-1, MUL VL]\n"
+ "ld1w z13.s, p7/z, [%[b_ptr0], #5, MUL VL]\n"
+ "ld1w z14.s, p7/z, [%[b_ptr0], #6, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
+ "ld1w z15.s, p7/z, [%[b_ptr0], #7, MUL VL]\n"
"fmla z20.s, z12.s, z5.s[1]\n"
"fmla z24.s, z12.s, z6.s[1]\n"
"fmla z28.s, z12.s, z7.s[1]\n"
@@ -1945,11 +2011,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z15.s, z5.s[1]\n"
"fmla z27.s, z15.s, z6.s[1]\n"
"fmla z31.s, z15.s, z7.s[1]\n"
- "b.eq 6f\n"
- "ld1w z8.s, p7/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p7/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p7/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p7/z, [%[b_ptr0], #3, MUL VL]\n"
+ "b.eq 5f\n"
+ "addvl %[b_ptr0], %[b_ptr0], #16\n"
+ "ld1w z8.s, p7/z, [%[b_ptr0], #-8, MUL VL]\n"
+ "ld1w z9.s, p7/z, [%[b_ptr0], #-7, MUL VL]\n"
+ "ld1w z10.s, p7/z, [%[b_ptr0], #-6, MUL VL]\n"
+ "ld1w z11.s, p7/z, [%[b_ptr0], #-5, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
@@ -1966,17 +2033,51 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"fmla z23.s, z11.s, z5.s[2]\n"
"fmla z27.s, z11.s, z6.s[2]\n"
"fmla z31.s, z11.s, z7.s[2]\n"
- "6:\n"
+ "5:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
+ "fmax z20.s, p7/m, z20.s, z14.s\n"
+ "fmax z21.s, p7/m, z21.s, z14.s\n"
+ "fmax z22.s, p7/m, z22.s, z14.s\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
+ "fmax z23.s, p7/m, z23.s, z14.s\n"
+ "fmin z20.s, p7/m, z20.s, z15.s\n"
+ "fmin z21.s, p7/m, z21.s, z15.s\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
+ "fmin z22.s, p7/m, z22.s, z15.s\n"
+ "fmin z23.s, p7/m, z23.s, z15.s\n"
+ "fmax z24.s, p7/m, z24.s, z14.s\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z14.s\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
+ "fmax z26.s, p7/m, z26.s, z14.s\n"
"st1w z20.s, p0, [c_ptr1]\n"
+ "fmin z24.s, p7/m, z24.s, z15.s\n"
+ "fmin z25.s, p7/m, z25.s, z15.s\n"
+ "fmax z27.s, p7/m, z27.s, z14.s\n"
"st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
+ "fmin z26.s, p7/m, z26.s, z15.s\n"
+ "fmax z28.s, p7/m, z28.s, z14.s\n"
+ "fmax z29.s, p7/m, z29.s, z14.s\n"
"st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
+ "fmin z27.s, p7/m, z27.s, z15.s\n"
+ "fmax z30.s, p7/m, z30.s, z14.s\n"
+ "fmin z28.s, p7/m, z28.s, z15.s\n"
"st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
+ "fmin z29.s, p7/m, z29.s, z15.s\n"
+ "fmax z31.s, p7/m, z31.s, z14.s\n"
+ "fmin z30.s, p7/m, z30.s, z15.s\n"
"st1w z24.s, p0, [c_ptr2]\n"
+ "fmin z31.s, p7/m, z31.s, z15.s\n"
"st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
"st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
"st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
@@ -1991,11 +2092,12 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
}
+
}
}
}