aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp977
1 files changed, 483 insertions, 494 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp
index 6e225669fc..3fc0e5fa36 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2018-2019 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,30 +25,49 @@
#include <algorithm>
+#include "arm_gemm.hpp"
#include "../../asmlib.hpp"
#include "../../utils.hpp"
namespace arm_gemm {
-void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb, float *C, int ldc, float beta, int M, int N, int K) {
- const long beta0 = (beta == 0.0f);
+void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
const long regs_count = (K / 4) - 1;
K -= (regs_count + 1) * 4;
const long leftovers = K;
+ float nullbias[256];
+ if (!append && !bias) {
+ memset(nullbias, 0, (4 * get_vector_length<float>() * sizeof(float)));
+ }
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ const float * const minptr = &minval;
+ const float * const maxptr = &maxval;
+
+ switch(act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ minval = 0.0f;
+ break;
+ }
for (int y=0; y<M; y+=4) {
const float * const a_ptr0_base = A + (y * lda);
const unsigned long ldab = lda * sizeof(float);
float *c_ptr0 = C + (y * ldc);
- const unsigned long ldcb = ldc * sizeof(float);
for (int x0=0; x0<N; x0+=(4 * get_vector_length<float>())) {
const long width = std::min((unsigned long)N-x0, (4 * get_vector_length<float>()));
- const float *betaptr = &beta;
long loops = loops_count;
long regs = regs_count;
long temp = 0;
@@ -56,6 +75,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
const float *a_ptr0 = a_ptr0_base;
const float *b_ptr0 = B + x0;
long ldbb = ldb * sizeof(float);
+ const unsigned long ldcb = ldc * sizeof(float);
+ const float *biasptr = bias ? bias+x0 : nullbias;
switch(M-y) {
case 1:
@@ -64,191 +85,173 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"whilelt p0.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
"whilelt p1.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "incw %[temp], all, mul #1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "mov z18.s, #0\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z19.s, #0\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "b 2f\n"
- "1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
- "ld1w z16.s, p0/z, [%[c_ptr0]]\n"
- "ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
- "ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
- "ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
+ "whilelt p2.s, %[temp], %[width]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
+ "incw %[temp], all, mul #1\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "whilelt p3.s, %[temp], %[width]\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
+ "cbz %[loops], 1f\n"
"2:\n"
- "cbz %[loops], 3f\n"
- "4:\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[0]\n"
"ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z0.s[2]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z18.s, z10.s, z0.s[2]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z19.s, z11.s, z0.s[2]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z12.s, z0.s[3]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z18.s, z14.s, z0.s[3]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z19.s, z15.s, z0.s[3]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z18.s, z10.s, z4.s[0]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z19.s, z11.s, z4.s[0]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z4.s[1]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z18.s, z14.s, z4.s[1]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z19.s, z15.s, z4.s[1]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z18.s, z10.s, z4.s[2]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z19.s, z11.s, z4.s[2]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z12.s, z4.s[3]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z4.s[3]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z18.s, z14.s, z4.s[3]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z19.s, z15.s, z4.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "b.ne 4b\n"
- "3:\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[regs], 5f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
+ "cbz %[regs], 3f\n"
+ "fmla z16.s, z8.s, z0.s[0]\n"
"ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
+ "fmla z18.s, z10.s, z0.s[0]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z19.s, z11.s, z0.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
+ "fmla z16.s, z12.s, z0.s[1]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z17.s, z13.s, z0.s[1]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z18.s, z14.s, z0.s[1]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
+ "fmla z19.s, z15.s, z0.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z16.s, z8.s, z0.s[2]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z17.s, z9.s, z0.s[2]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z18.s, z10.s, z0.s[2]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z19.s, z11.s, z0.s[2]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z12.s, z0.s[3]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z0.s[3]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z18.s, z14.s, z0.s[3]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z19.s, z15.s, z0.s[3]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
"ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z19.s, z11.s, z4.s[0]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z16.s, z12.s, z4.s[1]\n"
+ "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z19.s, z15.s, z4.s[1]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z4.s[2]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z18.s, z10.s, z4.s[2]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z19.s, z11.s, z4.s[2]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[3]\n"
"fmla z17.s, z13.s, z4.s[3]\n"
"fmla z18.s, z14.s, z4.s[3]\n"
"fmla z19.s, z15.s, z4.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -259,7 +262,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z9.s, z0.s[0]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -270,7 +273,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z13.s, z0.s[1]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -280,37 +283,37 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
"fmla z19.s, z11.s, z0.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 4f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
"ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
+ "fmla z18.s, z10.s, z0.s[0]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z19.s, z11.s, z0.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
+ "fmla z16.s, z12.s, z0.s[1]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z17.s, z13.s, z0.s[1]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z18.s, z14.s, z0.s[1]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
+ "fmla z19.s, z15.s, z0.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z16.s, z8.s, z0.s[2]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z17.s, z9.s, z0.s[2]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z18.s, z10.s, z0.s[2]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z19.s, z11.s, z0.s[2]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z16.s, z12.s, z0.s[3]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -321,7 +324,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z9.s, z4.s[0]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -332,7 +335,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z13.s, z4.s[1]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -342,14 +345,24 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z9.s, z4.s[2]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
- "6:\n"
+ "4:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb), [ldb] "r" (ldbb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -363,61 +376,33 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"whilelt p0.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
"whilelt p1.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "incw %[temp], all, mul #1\n"
+ "mov z20.d, z16.d\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "mov z18.s, #0\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "mov z19.s, #0\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z20.s, #0\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "mov z21.s, #0\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "mov z22.s, #0\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "mov z23.s, #0\n"
- "b 2f\n"
- "1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
- "ld1w z16.s, p0/z, [%[c_ptr0]]\n"
- "ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
- "ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
- "ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
- "ld1w z20.s, p0/z, [c_ptr1]\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
- "ld1w z21.s, p1/z, [c_ptr1, #1, MUL VL]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
- "ld1w z22.s, p2/z, [c_ptr1, #2, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
- "ld1w z23.s, p3/z, [c_ptr1, #3, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "fmul z20.s, p7/m, z20.s, z15.s\n"
- "ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "fmul z21.s, p7/m, z21.s, z15.s\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmul z22.s, p7/m, z22.s, z15.s\n"
+ "whilelt p2.s, %[temp], %[width]\n"
+ "mov z21.d, z17.d\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmul z23.s, p7/m, z23.s, z15.s\n"
+ "incw %[temp], all, mul #1\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "whilelt p3.s, %[temp], %[width]\n"
+ "mov z22.d, z18.d\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "2:\n"
- "add a_ptr1, a_ptr1, #0x10\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "cbz %[loops], 3f\n"
- "4:\n"
+ "mov z23.d, z19.d\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
@@ -425,200 +410,202 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z9.s, z0.s[0]\n"
"ld1rqw z5.s, p7/z, [a_ptr1]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "subs %[loops], %[loops], #0x1\n"
"fmla z18.s, z10.s, z0.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
+ "fmla z19.s, z11.s, z0.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
+ "fmla z23.s, z11.s, z1.s[0]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
+ "fmla z16.s, z12.s, z0.s[1]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
+ "fmla z20.s, z12.s, z1.s[1]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z17.s, z13.s, z0.s[1]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z22.s, z14.s, z1.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
"fmla z23.s, z15.s, z1.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
+ "fmla z16.s, z8.s, z0.s[2]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z20.s, z8.s, z1.s[2]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
"fmla z22.s, z10.s, z1.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
+ "fmla z16.s, z12.s, z0.s[3]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z20.s, z12.s, z1.s[3]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
"fmla z21.s, z13.s, z1.s[3]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
"fmla z22.s, z14.s, z1.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
"fmla z23.s, z15.s, z1.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "fmla z16.s, z8.s, z4.s[0]\n"
+ "ld1rqw z1.s, p7/z, [a_ptr1, #-0x10]\n"
"fmla z20.s, z8.s, z5.s[0]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
- "ld1rqw z1.s, p7/z, [a_ptr1, #-0x10]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z21.s, z9.s, z5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
"fmla z22.s, z10.s, z5.s[0]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
"fmla z23.s, z11.s, z5.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z5.s[1]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z21.s, z13.s, z5.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
"fmla z22.s, z14.s, z5.s[1]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
"fmla z23.s, z15.s, z5.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z8.s, z5.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z21.s, z9.s, z5.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
"fmla z22.s, z10.s, z5.s[2]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
"fmla z23.s, z11.s, z5.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[3]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z5.s[3]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z4.s[3]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z21.s, z13.s, z5.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z4.s[3]\n"
"fmla z22.s, z14.s, z5.s[3]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
- "b.ne 4b\n"
- "3:\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[regs], 5f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
+ "b.ne 2b\n"
+ "1:\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
+ "cbz %[regs], 3f\n"
+ "fmla z16.s, z8.s, z0.s[0]\n"
"ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
+ "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z1.s[1]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
"fmla z22.s, z14.s, z1.s[1]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
"fmla z23.s, z15.s, z1.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z8.s, z1.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
"fmla z22.s, z10.s, z1.s[2]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[2]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[3]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z1.s[3]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z21.s, z13.s, z1.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
"fmla z22.s, z14.s, z1.s[3]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
"ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
"fmla z23.s, z15.s, z1.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[0]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
"fmla z20.s, z8.s, z5.s[0]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z21.s, z9.s, z5.s[0]\n"
- "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z22.s, z10.s, z5.s[0]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #2\n"
"fmla z23.s, z11.s, z5.s[0]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z4.s[1]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z5.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z21.s, z13.s, z5.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z22.s, z14.s, z5.s[1]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
"fmla z23.s, z15.s, z5.s[1]\n"
+ "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z8.s, z4.s[2]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z20.s, z8.s, z5.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z21.s, z9.s, z5.s[2]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
"fmla z22.s, z10.s, z5.s[2]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
@@ -631,7 +618,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z14.s, z5.s[3]\n"
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -646,7 +633,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z10.s, z1.s[0]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -661,7 +648,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z14.s, z1.s[1]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
"fmla z23.s, z15.s, z1.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -675,40 +662,41 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z10.s, z1.s[2]\n"
"fmla z19.s, z11.s, z0.s[2]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 4f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
+ "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z5.s, p6/z, [a_ptr1]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z1.s[1]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "addvl a_ptr1, a_ptr1, #1\n"
"fmla z21.s, z13.s, z1.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
"fmla z22.s, z14.s, z1.s[1]\n"
+ "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z15.s, z0.s[1]\n"
"fmla z23.s, z15.s, z1.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z8.s, z0.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
"fmla z22.s, z10.s, z1.s[2]\n"
@@ -722,7 +710,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z14.s, z1.s[3]\n"
"fmla z19.s, z15.s, z0.s[3]\n"
"fmla z23.s, z15.s, z1.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -737,7 +725,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z10.s, z5.s[0]\n"
"fmla z19.s, z11.s, z4.s[0]\n"
"fmla z23.s, z11.s, z5.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -752,7 +740,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z14.s, z5.s[1]\n"
"fmla z19.s, z15.s, z4.s[1]\n"
"fmla z23.s, z15.s, z5.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -766,10 +754,28 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z22.s, z10.s, z5.s[2]\n"
"fmla z19.s, z11.s, z4.s[2]\n"
"fmla z23.s, z11.s, z5.s[2]\n"
- "6:\n"
+ "4:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
+ "fmax z20.s, p7/m, z20.s, z14.s\n"
+ "fmax z21.s, p7/m, z21.s, z14.s\n"
+ "fmax z22.s, p7/m, z22.s, z14.s\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
+ "fmax z23.s, p7/m, z23.s, z14.s\n"
+ "fmin z20.s, p7/m, z20.s, z15.s\n"
+ "fmin z21.s, p7/m, z21.s, z15.s\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
+ "fmin z22.s, p7/m, z22.s, z15.s\n"
+ "fmin z23.s, p7/m, z23.s, z15.s\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
"st1w z20.s, p0, [c_ptr1]\n"
@@ -779,7 +785,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb), [ldb] "r" (ldbb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -791,85 +797,45 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"c_ptr2 .req X3\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
"whilelt p6.s, %[temp], %[leftovers]\n"
"whilelt p0.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
"ptrue p7.s\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
"whilelt p1.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "incw %[temp], all, mul #1\n"
+ "mov z20.d, z16.d\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
+ "mov z24.d, z16.d\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "mov z18.s, #0\n"
"ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "mov z19.s, #0\n"
+ "whilelt p2.s, %[temp], %[width]\n"
+ "mov z21.d, z17.d\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "mov z20.s, #0\n"
+ "mov z25.d, z17.d\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z21.s, #0\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "mov z22.s, #0\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "mov z23.s, #0\n"
+ "incw %[temp], all, mul #1\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "mov z24.s, #0\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "mov z25.s, #0\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "mov z26.s, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z27.s, #0\n"
- "b 2f\n"
- "1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
- "ld1w z16.s, p0/z, [%[c_ptr0]]\n"
- "ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
- "ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
- "ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
- "ld1w z20.s, p0/z, [c_ptr1]\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
- "ld1w z21.s, p1/z, [c_ptr1, #1, MUL VL]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
- "ld1w z22.s, p2/z, [c_ptr1, #2, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
- "ld1w z23.s, p3/z, [c_ptr1, #3, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
- "ld1w z24.s, p0/z, [c_ptr2]\n"
- "fmul z20.s, p7/m, z20.s, z15.s\n"
- "ld1w z25.s, p1/z, [c_ptr2, #1, MUL VL]\n"
- "fmul z21.s, p7/m, z21.s, z15.s\n"
- "ld1w z26.s, p2/z, [c_ptr2, #2, MUL VL]\n"
- "fmul z22.s, p7/m, z22.s, z15.s\n"
- "ld1w z27.s, p3/z, [c_ptr2, #3, MUL VL]\n"
- "fmul z23.s, p7/m, z23.s, z15.s\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "fmul z24.s, p7/m, z24.s, z15.s\n"
- "ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "fmul z25.s, p7/m, z25.s, z15.s\n"
- "ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "fmul z26.s, p7/m, z26.s, z15.s\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmul z27.s, p7/m, z27.s, z15.s\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
+ "whilelt p3.s, %[temp], %[width]\n"
+ "mov z22.d, z18.d\n"
"add a_ptr1, a_ptr1, #0x10\n"
+ "mov z26.d, z18.d\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
+ "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"add a_ptr2, a_ptr2, #0x10\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "2:\n"
+ "mov z23.d, z19.d\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "mov z27.d, z19.d\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 3f\n"
- "4:\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
@@ -879,28 +845,28 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z9.s, z0.s[0]\n"
"ld1rqw z6.s, p7/z, [a_ptr2]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "subs %[loops], %[loops], #0x1\n"
"fmla z25.s, z9.s, z2.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
+ "fmla z22.s, z10.s, z1.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
+ "fmla z26.s, z10.s, z2.s[0]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
+ "fmla z19.s, z11.s, z0.s[0]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
"fmla z20.s, z12.s, z1.s[1]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z0.s[1]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
"fmla z25.s, z13.s, z2.s[1]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -916,9 +882,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
@@ -933,9 +899,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z1.s[3]\n"
"fmla z24.s, z12.s, z2.s[3]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
"fmla z21.s, z13.s, z1.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z13.s, z2.s[3]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
@@ -953,12 +919,12 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z8.s, z5.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z24.s, z8.s, z6.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
"fmla z21.s, z9.s, z5.s[0]\n"
"fmla z25.s, z9.s, z6.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z18.s, z10.s, z4.s[0]\n"
"fmla z22.s, z10.s, z5.s[0]\n"
"fmla z26.s, z10.s, z6.s[0]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
@@ -970,9 +936,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z5.s[1]\n"
"fmla z24.s, z12.s, z6.s[1]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
"fmla z21.s, z13.s, z5.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z13.s, z6.s[1]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
@@ -987,9 +953,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[2]\n"
"fmla z21.s, z9.s, z5.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z6.s[2]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[2]\n"
@@ -1004,9 +970,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z5.s[3]\n"
"fmla z24.s, z12.s, z6.s[3]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z4.s[3]\n"
"fmla z21.s, z13.s, z5.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z13.s, z6.s[3]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z4.s[3]\n"
@@ -1016,37 +982,37 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
- "b.ne 4b\n"
- "3:\n"
- "cbz %[regs], 5f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
+ "b.ne 2b\n"
+ "1:\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
+ "cbz %[regs], 3f\n"
+ "fmla z16.s, z8.s, z0.s[0]\n"
"ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
+ "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
+ "fmla z24.s, z8.s, z2.s[0]\n"
"ld1rqw z6.s, p7/z, [a_ptr2]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
"fmla z26.s, z10.s, z2.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z13.s, z2.s[1]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
@@ -1061,9 +1027,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[2]\n"
@@ -1078,9 +1044,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z1.s[3]\n"
"fmla z24.s, z12.s, z2.s[3]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[3]\n"
"fmla z21.s, z13.s, z1.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z13.s, z2.s[3]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[3]\n"
@@ -1098,12 +1064,15 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z8.s, z5.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z24.s, z8.s, z6.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z21.s, z9.s, z5.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #2\n"
"fmla z25.s, z9.s, z6.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z18.s, z10.s, z4.s[0]\n"
+ "addvl a_ptr2, a_ptr2, #2\n"
"fmla z22.s, z10.s, z5.s[0]\n"
"fmla z26.s, z10.s, z6.s[0]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
@@ -1115,9 +1084,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z5.s[1]\n"
"fmla z24.s, z12.s, z6.s[1]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z4.s[1]\n"
"fmla z21.s, z13.s, z5.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z13.s, z6.s[1]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z4.s[1]\n"
@@ -1152,7 +1121,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z15.s, z4.s[3]\n"
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -1171,7 +1140,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z11.s, z0.s[0]\n"
"fmla z23.s, z11.s, z1.s[0]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -1190,7 +1159,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z15.s, z0.s[1]\n"
"fmla z23.s, z15.s, z1.s[1]\n"
"fmla z27.s, z15.s, z2.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -1208,36 +1177,38 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z11.s, z0.s[2]\n"
"fmla z23.s, z11.s, z1.s[2]\n"
"fmla z27.s, z11.s, z2.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 4f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
+ "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z5.s, p6/z, [a_ptr1]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
+ "fmla z24.s, z8.s, z2.s[0]\n"
"ld1rqw z6.s, p6/z, [a_ptr2]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z22.s, z10.s, z1.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #1\n"
"fmla z26.s, z10.s, z2.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl a_ptr2, a_ptr2, #1\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z16.s, z12.s, z0.s[1]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
+ "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z13.s, z0.s[1]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z13.s, z2.s[1]\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z14.s, z0.s[1]\n"
@@ -1272,7 +1243,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z15.s, z0.s[3]\n"
"fmla z23.s, z15.s, z1.s[3]\n"
"fmla z27.s, z15.s, z2.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -1291,7 +1262,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z11.s, z4.s[0]\n"
"fmla z23.s, z11.s, z5.s[0]\n"
"fmla z27.s, z11.s, z6.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -1310,7 +1281,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z15.s, z4.s[1]\n"
"fmla z23.s, z15.s, z5.s[1]\n"
"fmla z27.s, z15.s, z6.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -1328,14 +1299,40 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z19.s, z11.s, z4.s[2]\n"
"fmla z23.s, z11.s, z5.s[2]\n"
"fmla z27.s, z11.s, z6.s[2]\n"
- "6:\n"
+ "4:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
+ "fmax z20.s, p7/m, z20.s, z14.s\n"
+ "fmax z21.s, p7/m, z21.s, z14.s\n"
+ "fmax z22.s, p7/m, z22.s, z14.s\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
+ "fmax z23.s, p7/m, z23.s, z14.s\n"
+ "fmin z20.s, p7/m, z20.s, z15.s\n"
+ "fmin z21.s, p7/m, z21.s, z15.s\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
+ "fmin z22.s, p7/m, z22.s, z15.s\n"
+ "fmin z23.s, p7/m, z23.s, z15.s\n"
+ "fmax z24.s, p7/m, z24.s, z14.s\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z14.s\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
+ "fmax z26.s, p7/m, z26.s, z14.s\n"
"st1w z20.s, p0, [c_ptr1]\n"
+ "fmin z24.s, p7/m, z24.s, z15.s\n"
+ "fmin z25.s, p7/m, z25.s, z15.s\n"
+ "fmax z27.s, p7/m, z27.s, z14.s\n"
"st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
+ "fmin z26.s, p7/m, z26.s, z15.s\n"
+ "fmin z27.s, p7/m, z27.s, z15.s\n"
"st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
"st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
"st1w z24.s, p0, [c_ptr2]\n"
@@ -1347,7 +1344,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb), [ldb] "r" (ldbb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1362,103 +1359,53 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"c_ptr3 .req X5\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "whilelt p6.s, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "ptrue p7.s\n"
- "whilelt p1.s, %[temp], %[width]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
+ "whilelt p6.s, %[temp], %[leftovers]\n"
+ "whilelt p0.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "cbz %[beta0], 1f\n"
- "mov z16.s, #0\n"
+ "ptrue p7.s\n"
+ "ld1w z16.s, p0/z, [%[biasptr]]\n"
+ "whilelt p1.s, %[temp], %[width]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "mov z17.s, #0\n"
+ "incw %[temp], all, mul #1\n"
+ "mov z20.d, z16.d\n"
+ "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
+ "mov z24.d, z16.d\n"
"ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "mov z18.s, #0\n"
+ "mov z28.d, z16.d\n"
"ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "mov z19.s, #0\n"
"ld1rqw z3.s, p7/z, [a_ptr3]\n"
- "mov z20.s, #0\n"
+ "whilelt p2.s, %[temp], %[width]\n"
+ "mov z21.d, z17.d\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "mov z21.s, #0\n"
+ "mov z25.d, z17.d\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z22.s, #0\n"
+ "mov z29.d, z17.d\n"
+ "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "mov z23.s, #0\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "mov z24.s, #0\n"
+ "incw %[temp], all, mul #1\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "mov z25.s, #0\n"
+ "mov z22.d, z18.d\n"
"add a_ptr1, a_ptr1, #0x10\n"
- "mov z26.s, #0\n"
+ "mov z26.d, z18.d\n"
+ "whilelt p3.s, %[temp], %[width]\n"
+ "mov z30.d, z18.d\n"
"add a_ptr2, a_ptr2, #0x10\n"
- "mov z27.s, #0\n"
"add a_ptr3, a_ptr3, #0x10\n"
- "mov z28.s, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z29.s, #0\n"
- "mov z30.s, #0\n"
- "mov z31.s, #0\n"
- "b 2f\n"
- "1:\n"
- "ld1rw z15.s, p7/z, [%[betaptr]]\n"
- "ld1w z16.s, p0/z, [%[c_ptr0]]\n"
- "ld1w z17.s, p1/z, [%[c_ptr0], #1, MUL VL]\n"
- "ld1w z18.s, p2/z, [%[c_ptr0], #2, MUL VL]\n"
- "ld1w z19.s, p3/z, [%[c_ptr0], #3, MUL VL]\n"
- "ld1w z20.s, p0/z, [c_ptr1]\n"
- "fmul z16.s, p7/m, z16.s, z15.s\n"
- "ld1w z21.s, p1/z, [c_ptr1, #1, MUL VL]\n"
- "fmul z17.s, p7/m, z17.s, z15.s\n"
- "ld1w z22.s, p2/z, [c_ptr1, #2, MUL VL]\n"
- "fmul z18.s, p7/m, z18.s, z15.s\n"
- "ld1w z23.s, p3/z, [c_ptr1, #3, MUL VL]\n"
- "fmul z19.s, p7/m, z19.s, z15.s\n"
- "ld1w z24.s, p0/z, [c_ptr2]\n"
- "fmul z20.s, p7/m, z20.s, z15.s\n"
- "ld1w z25.s, p1/z, [c_ptr2, #1, MUL VL]\n"
- "fmul z21.s, p7/m, z21.s, z15.s\n"
- "ld1w z26.s, p2/z, [c_ptr2, #2, MUL VL]\n"
- "fmul z22.s, p7/m, z22.s, z15.s\n"
- "ld1w z27.s, p3/z, [c_ptr2, #3, MUL VL]\n"
- "fmul z23.s, p7/m, z23.s, z15.s\n"
- "ld1w z28.s, p0/z, [c_ptr3]\n"
- "fmul z24.s, p7/m, z24.s, z15.s\n"
- "ld1w z29.s, p1/z, [c_ptr3, #1, MUL VL]\n"
- "fmul z25.s, p7/m, z25.s, z15.s\n"
- "ld1w z30.s, p2/z, [c_ptr3, #2, MUL VL]\n"
- "fmul z26.s, p7/m, z26.s, z15.s\n"
- "ld1w z31.s, p3/z, [c_ptr3, #3, MUL VL]\n"
- "fmul z27.s, p7/m, z27.s, z15.s\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "fmul z28.s, p7/m, z28.s, z15.s\n"
- "ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "fmul z29.s, p7/m, z29.s, z15.s\n"
- "ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "fmul z30.s, p7/m, z30.s, z15.s\n"
- "ld1rqw z3.s, p7/z, [a_ptr3]\n"
- "fmul z31.s, p7/m, z31.s, z15.s\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
+ "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "add a_ptr3, a_ptr3, #0x10\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "2:\n"
+ "mov z23.d, z19.d\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "mov z27.d, z19.d\n"
"ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "mov z31.d, z19.d\n"
"ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 3f\n"
- "4:\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
"fmla z20.s, z8.s, z1.s[0]\n"
@@ -1470,23 +1417,23 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z17.s, z9.s, z0.s[0]\n"
"ld1rqw z7.s, p7/z, [a_ptr3]\n"
"fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "subs %[loops], %[loops], #0x1\n"
"fmla z25.s, z9.s, z2.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z29.s, z9.s, z3.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
+ "fmla z18.s, z10.s, z0.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "fmla z22.s, z10.s, z1.s[0]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
"fmla z26.s, z10.s, z2.s[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
"fmla z30.s, z10.s, z3.s[0]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
"fmla z23.s, z11.s, z1.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
+ "add a_ptr3, a_ptr3, #0x20\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"fmla z31.s, z11.s, z3.s[0]\n"
"ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
@@ -1495,8 +1442,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
"fmla z28.s, z12.s, z3.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z0.s[1]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
"fmla z25.s, z13.s, z2.s[1]\n"
"fmla z29.s, z13.s, z3.s[1]\n"
@@ -1516,8 +1463,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z28.s, z8.s, z3.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"fmla z29.s, z9.s, z3.s[2]\n"
@@ -1537,8 +1484,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z1.s[3]\n"
"fmla z24.s, z12.s, z2.s[3]\n"
"fmla z28.s, z12.s, z3.s[3]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z0.s[3]\n"
"fmla z21.s, z13.s, z1.s[3]\n"
"fmla z25.s, z13.s, z2.s[3]\n"
"fmla z29.s, z13.s, z3.s[3]\n"
@@ -1562,9 +1509,9 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z24.s, z8.s, z6.s[0]\n"
"fmla z28.s, z8.s, z7.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
"fmla z21.s, z9.s, z5.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z6.s[0]\n"
"fmla z29.s, z9.s, z7.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -1583,8 +1530,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z5.s[1]\n"
"fmla z24.s, z12.s, z6.s[1]\n"
"fmla z28.s, z12.s, z7.s[1]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z4.s[1]\n"
"fmla z21.s, z13.s, z5.s[1]\n"
"fmla z25.s, z13.s, z6.s[1]\n"
"fmla z29.s, z13.s, z7.s[1]\n"
@@ -1604,8 +1551,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z8.s, z5.s[2]\n"
"fmla z24.s, z8.s, z6.s[2]\n"
"fmla z28.s, z8.s, z7.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z4.s[2]\n"
"fmla z21.s, z9.s, z5.s[2]\n"
"fmla z25.s, z9.s, z6.s[2]\n"
"fmla z29.s, z9.s, z7.s[2]\n"
@@ -1625,8 +1572,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z5.s[3]\n"
"fmla z24.s, z12.s, z6.s[3]\n"
"fmla z28.s, z12.s, z7.s[3]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z4.s[3]\n"
"fmla z21.s, z13.s, z5.s[3]\n"
"fmla z25.s, z13.s, z6.s[3]\n"
"fmla z29.s, z13.s, z7.s[3]\n"
@@ -1640,28 +1587,28 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
"fmla z31.s, z15.s, z7.s[3]\n"
- "b.ne 4b\n"
- "3:\n"
- "cbz %[regs], 5f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
+ "b.ne 2b\n"
+ "1:\n"
"ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
+ "cbz %[regs], 3f\n"
+ "fmla z16.s, z8.s, z0.s[0]\n"
"ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
+ "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z28.s, z8.s, z3.s[0]\n"
+ "fmla z24.s, z8.s, z2.s[0]\n"
"ld1rqw z6.s, p7/z, [a_ptr2]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
+ "fmla z28.s, z8.s, z3.s[0]\n"
"ld1rqw z7.s, p7/z, [a_ptr3]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
"fmla z29.s, z9.s, z3.s[0]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
"fmla z22.s, z10.s, z1.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z26.s, z10.s, z2.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z30.s, z10.s, z3.s[0]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
@@ -1674,8 +1621,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
"fmla z28.s, z12.s, z3.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z0.s[1]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
"fmla z25.s, z13.s, z2.s[1]\n"
"fmla z29.s, z13.s, z3.s[1]\n"
@@ -1695,8 +1642,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z8.s, z1.s[2]\n"
"fmla z24.s, z8.s, z2.s[2]\n"
"fmla z28.s, z8.s, z3.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z9.s, z0.s[2]\n"
"fmla z21.s, z9.s, z1.s[2]\n"
"fmla z25.s, z9.s, z2.s[2]\n"
"fmla z29.s, z9.s, z3.s[2]\n"
@@ -1716,8 +1663,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z1.s[3]\n"
"fmla z24.s, z12.s, z2.s[3]\n"
"fmla z28.s, z12.s, z3.s[3]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z0.s[3]\n"
"fmla z21.s, z13.s, z1.s[3]\n"
"fmla z25.s, z13.s, z2.s[3]\n"
"fmla z29.s, z13.s, z3.s[3]\n"
@@ -1740,11 +1687,15 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z8.s, z5.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"fmla z24.s, z8.s, z6.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #2\n"
"fmla z28.s, z8.s, z7.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z17.s, z9.s, z4.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #2\n"
"fmla z21.s, z9.s, z5.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "addvl a_ptr2, a_ptr2, #2\n"
"fmla z25.s, z9.s, z6.s[0]\n"
+ "addvl a_ptr3, a_ptr3, #2\n"
"fmla z29.s, z9.s, z7.s[0]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z4.s[0]\n"
@@ -1762,8 +1713,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z5.s[1]\n"
"fmla z24.s, z12.s, z6.s[1]\n"
"fmla z28.s, z12.s, z7.s[1]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z4.s[1]\n"
"fmla z21.s, z13.s, z5.s[1]\n"
"fmla z25.s, z13.s, z6.s[1]\n"
"fmla z29.s, z13.s, z7.s[1]\n"
@@ -1810,7 +1761,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z15.s, z5.s[3]\n"
"fmla z27.s, z15.s, z6.s[3]\n"
"fmla z31.s, z15.s, z7.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -1833,7 +1784,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z11.s, z1.s[0]\n"
"fmla z27.s, z11.s, z2.s[0]\n"
"fmla z31.s, z11.s, z3.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -1856,7 +1807,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z15.s, z1.s[1]\n"
"fmla z27.s, z15.s, z2.s[1]\n"
"fmla z31.s, z15.s, z3.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -1878,27 +1829,30 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z11.s, z1.s[2]\n"
"fmla z27.s, z11.s, z2.s[2]\n"
"fmla z31.s, z11.s, z3.s[2]\n"
- "b 6f\n"
- "5:\n"
+ "b 4f\n"
+ "3:\n"
"fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
+ "fmla z20.s, z8.s, z1.s[0]\n"
"ld1rqw z5.s, p6/z, [a_ptr1]\n"
- "fmla z28.s, z8.s, z3.s[0]\n"
+ "fmla z24.s, z8.s, z2.s[0]\n"
"ld1rqw z6.s, p6/z, [a_ptr2]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
+ "fmla z28.s, z8.s, z3.s[0]\n"
"ld1rqw z7.s, p6/z, [a_ptr3]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
+ "fmla z17.s, z9.s, z0.s[0]\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
+ "fmla z21.s, z9.s, z1.s[0]\n"
+ "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"fmla z25.s, z9.s, z2.s[0]\n"
+ "addvl %[a_ptr0], %[a_ptr0], #1\n"
"fmla z29.s, z9.s, z3.s[0]\n"
+ "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
"fmla z18.s, z10.s, z0.s[0]\n"
+ "addvl a_ptr1, a_ptr1, #1\n"
"fmla z22.s, z10.s, z1.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
+ "addvl a_ptr2, a_ptr2, #1\n"
"fmla z26.s, z10.s, z2.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
+ "addvl a_ptr3, a_ptr3, #1\n"
"fmla z30.s, z10.s, z3.s[0]\n"
"ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
"fmla z19.s, z11.s, z0.s[0]\n"
@@ -1911,8 +1865,8 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z20.s, z12.s, z1.s[1]\n"
"fmla z24.s, z12.s, z2.s[1]\n"
"fmla z28.s, z12.s, z3.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
+ "fmla z17.s, z13.s, z0.s[1]\n"
"fmla z21.s, z13.s, z1.s[1]\n"
"fmla z25.s, z13.s, z2.s[1]\n"
"fmla z29.s, z13.s, z3.s[1]\n"
@@ -1959,7 +1913,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z15.s, z1.s[3]\n"
"fmla z27.s, z15.s, z2.s[3]\n"
"fmla z31.s, z15.s, z3.s[3]\n"
- "cbz %[blocks], 6f\n"
+ "cbz %[blocks], 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
@@ -1982,7 +1936,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z11.s, z5.s[0]\n"
"fmla z27.s, z11.s, z6.s[0]\n"
"fmla z31.s, z11.s, z7.s[0]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ld1w z12.s, p0/z, [%[b_ptr0]]\n"
@@ -2005,7 +1959,7 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z15.s, z5.s[1]\n"
"fmla z27.s, z15.s, z6.s[1]\n"
"fmla z31.s, z15.s, z7.s[1]\n"
- "b.eq 6f\n"
+ "b.eq 4f\n"
"add %[b_ptr0], %[b_ptr0], %[ldb]\n"
"ld1w z8.s, p0/z, [%[b_ptr0]]\n"
"ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
@@ -2027,17 +1981,51 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
"fmla z23.s, z11.s, z5.s[2]\n"
"fmla z27.s, z11.s, z6.s[2]\n"
"fmla z31.s, z11.s, z7.s[2]\n"
- "6:\n"
+ "4:\n"
+ "ld1rw z14.s, p7/z, [%[minptr]]\n"
+ "ld1rw z15.s, p7/z, [%[maxptr]]\n"
+ "fmax z16.s, p7/m, z16.s, z14.s\n"
+ "fmax z17.s, p7/m, z17.s, z14.s\n"
+ "fmax z18.s, p7/m, z18.s, z14.s\n"
+ "fmax z19.s, p7/m, z19.s, z14.s\n"
+ "fmin z16.s, p7/m, z16.s, z15.s\n"
+ "fmin z17.s, p7/m, z17.s, z15.s\n"
+ "fmin z18.s, p7/m, z18.s, z15.s\n"
+ "fmin z19.s, p7/m, z19.s, z15.s\n"
"st1w z16.s, p0, [%[c_ptr0]]\n"
+ "fmax z20.s, p7/m, z20.s, z14.s\n"
+ "fmax z21.s, p7/m, z21.s, z14.s\n"
+ "fmax z22.s, p7/m, z22.s, z14.s\n"
"st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
+ "fmax z23.s, p7/m, z23.s, z14.s\n"
+ "fmin z20.s, p7/m, z20.s, z15.s\n"
+ "fmin z21.s, p7/m, z21.s, z15.s\n"
"st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
+ "fmin z22.s, p7/m, z22.s, z15.s\n"
+ "fmin z23.s, p7/m, z23.s, z15.s\n"
+ "fmax z24.s, p7/m, z24.s, z14.s\n"
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
+ "fmax z25.s, p7/m, z25.s, z14.s\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
+ "fmax z26.s, p7/m, z26.s, z14.s\n"
"st1w z20.s, p0, [c_ptr1]\n"
+ "fmin z24.s, p7/m, z24.s, z15.s\n"
+ "fmin z25.s, p7/m, z25.s, z15.s\n"
+ "fmax z27.s, p7/m, z27.s, z14.s\n"
"st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
+ "fmin z26.s, p7/m, z26.s, z15.s\n"
+ "fmax z28.s, p7/m, z28.s, z14.s\n"
+ "fmax z29.s, p7/m, z29.s, z14.s\n"
"st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
+ "fmin z27.s, p7/m, z27.s, z15.s\n"
+ "fmax z30.s, p7/m, z30.s, z14.s\n"
+ "fmin z28.s, p7/m, z28.s, z15.s\n"
"st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
+ "fmin z29.s, p7/m, z29.s, z15.s\n"
+ "fmax z31.s, p7/m, z31.s, z14.s\n"
+ "fmin z30.s, p7/m, z30.s, z15.s\n"
"st1w z24.s, p0, [c_ptr2]\n"
+ "fmin z31.s, p7/m, z31.s, z15.s\n"
"st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
"st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
"st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
@@ -2052,11 +2040,12 @@ void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb,
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [betaptr] "r" (betaptr), [width] "r" (width), [beta0] "r" (beta0), [leftovers] "r" (leftovers), [lda] "r" (ldab), [ldc] "r" (ldcb), [ldb] "r" (ldbb)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
}
+
}
}
}