aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp679
1 files changed, 339 insertions, 340 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
index 9ae51af59b..66481f04f9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
@@ -139,11 +139,11 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
"cbnz x28, 8f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #2\n"
@@ -159,12 +159,12 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"9:" // Height 1: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x26, x26, #0x4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z10.s, p4/M, z17.s, z0.s\n"
+ "fmla z11.s, p4/M, z16.s, z0.s\n"
"subs x27, x27, #0x1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
@@ -174,27 +174,27 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z10.s, p4/M, z17.s, z0.s\n"
+ "fmla z11.s, p4/M, z16.s, z0.s\n"
"addvl x10, x10, #4\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
+ "ld1rw { z17.s }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
- "fmin z8.s, p4/M, z8.s, z1.s\n"
- "fmin z9.s, p4/M, z9.s, z1.s\n"
- "fmin z10.s, p4/M, z10.s, z1.s\n"
- "fmin z11.s, p4/M, z11.s, z1.s\n"
- "fmax z8.s, p4/M, z8.s, z0.s\n"
- "fmax z9.s, p4/M, z9.s, z0.s\n"
- "fmax z10.s, p4/M, z10.s, z0.s\n"
- "fmax z11.s, p4/M, z11.s, z0.s\n"
+ "ld1rw { z16.s }, p4/Z, [x20]\n"
+ "fmin z8.s, p4/M, z8.s, z17.s\n"
+ "fmin z9.s, p4/M, z9.s, z17.s\n"
+ "fmin z10.s, p4/M, z10.s, z17.s\n"
+ "fmin z11.s, p4/M, z11.s, z17.s\n"
+ "fmax z8.s, p4/M, z8.s, z16.s\n"
+ "fmax z9.s, p4/M, z9.s, z16.s\n"
+ "fmax z10.s, p4/M, z10.s, z16.s\n"
+ "fmax z11.s, p4/M, z11.s, z16.s\n"
"11:" // Height 1: No activation
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
@@ -234,15 +234,15 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x20]\n"
+ "ld1w { z13.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 17f\n"
"16:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -258,12 +258,12 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
"cbnz x28, 20f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #2\n"
@@ -271,7 +271,7 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 20f\n"
"19:" // Height 2: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
+ "add x25, x26, x21, LSL #2\n"
"20:" // Height 2: input setup done
"subs x27, x27, #0x1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -282,18 +282,18 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"21:" // Height 2: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x10, #2, MUL VL]\n"
"add x26, x26, #0x4\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"subs x27, x27, #0x1\n"
"add x25, x25, #0x4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z10.s, p4/M, z17.s, z0.s\n"
+ "fmla z14.s, p4/M, z17.s, z1.s\n"
+ "fmla z11.s, p4/M, z16.s, z0.s\n"
+ "fmla z15.s, p4/M, z16.s, z1.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
@@ -303,41 +303,41 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "fmla z10.s, p4/M, z17.s, z0.s\n"
+ "fmla z14.s, p4/M, z17.s, z1.s\n"
"addvl x10, x10, #4\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z11.s, p4/M, z16.s, z0.s\n"
+ "fmla z15.s, p4/M, z16.s, z1.s\n"
"bne 18b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #2\n"
"tbz %x[flags], #1, 23f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
+ "ld1rw { z17.s }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
- "fmin z8.s, p4/M, z8.s, z1.s\n"
- "fmin z9.s, p4/M, z9.s, z1.s\n"
- "fmin z10.s, p4/M, z10.s, z1.s\n"
- "fmin z11.s, p4/M, z11.s, z1.s\n"
- "fmin z12.s, p4/M, z12.s, z1.s\n"
- "fmin z13.s, p4/M, z13.s, z1.s\n"
- "fmin z14.s, p4/M, z14.s, z1.s\n"
- "fmin z15.s, p4/M, z15.s, z1.s\n"
- "fmax z8.s, p4/M, z8.s, z0.s\n"
- "fmax z9.s, p4/M, z9.s, z0.s\n"
- "fmax z10.s, p4/M, z10.s, z0.s\n"
- "fmax z11.s, p4/M, z11.s, z0.s\n"
- "fmax z12.s, p4/M, z12.s, z0.s\n"
- "fmax z13.s, p4/M, z13.s, z0.s\n"
- "fmax z14.s, p4/M, z14.s, z0.s\n"
- "fmax z15.s, p4/M, z15.s, z0.s\n"
+ "ld1rw { z16.s }, p4/Z, [x20]\n"
+ "fmin z8.s, p4/M, z8.s, z17.s\n"
+ "fmin z9.s, p4/M, z9.s, z17.s\n"
+ "fmin z10.s, p4/M, z10.s, z17.s\n"
+ "fmin z11.s, p4/M, z11.s, z17.s\n"
+ "fmin z12.s, p4/M, z12.s, z17.s\n"
+ "fmin z13.s, p4/M, z13.s, z17.s\n"
+ "fmin z14.s, p4/M, z14.s, z17.s\n"
+ "fmin z15.s, p4/M, z15.s, z17.s\n"
+ "fmax z8.s, p4/M, z8.s, z16.s\n"
+ "fmax z9.s, p4/M, z9.s, z16.s\n"
+ "fmax z10.s, p4/M, z10.s, z16.s\n"
+ "fmax z11.s, p4/M, z11.s, z16.s\n"
+ "fmax z12.s, p4/M, z12.s, z16.s\n"
+ "fmax z13.s, p4/M, z13.s, z16.s\n"
+ "fmax z14.s, p4/M, z14.s, z16.s\n"
+ "fmax z15.s, p4/M, z15.s, z16.s\n"
"23:" // Height 2: No activation
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
@@ -385,20 +385,20 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x21]\n"
+ "ld1w { z13.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x20]\n"
+ "ld1w { z17.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 29f\n"
"28:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -418,13 +418,13 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
"cbnz x28, 32f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #2\n"
@@ -433,8 +433,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 32f\n"
"31:" // Height 3: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x25, x26, x21, LSL #2\n"
+ "add x24, x25, x21, LSL #2\n"
"32:" // Height 3: input setup done
"subs x27, x27, #0x1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -450,21 +450,21 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"subs x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z21.s }, p4/Z, [x10, #2, MUL VL]\n"
"add x25, x25, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x24, x24, #0x4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
- "fmla z18.s, p4/M, z6.s, z2.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z10.s, p4/M, z21.s, z0.s\n"
+ "fmla z14.s, p4/M, z21.s, z1.s\n"
+ "fmla z18.s, p4/M, z21.s, z2.s\n"
+ "fmla z11.s, p4/M, z20.s, z0.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
- "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z15.s, p4/M, z20.s, z1.s\n"
+ "fmla z19.s, p4/M, z20.s, z2.s\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
@@ -476,51 +476,51 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z21.s }, p4/Z, [x10, #2, MUL VL]\n"
"cmp x28, x20\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
- "fmla z18.s, p4/M, z6.s, z2.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
- "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z10.s, p4/M, z21.s, z0.s\n"
+ "fmla z14.s, p4/M, z21.s, z1.s\n"
+ "fmla z18.s, p4/M, z21.s, z2.s\n"
+ "fmla z11.s, p4/M, z20.s, z0.s\n"
+ "fmla z15.s, p4/M, z20.s, z1.s\n"
+ "fmla z19.s, p4/M, z20.s, z2.s\n"
"bne 30b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"tbz %x[flags], #1, 35f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
+ "ld1rw { z21.s }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
- "fmin z8.s, p4/M, z8.s, z1.s\n"
- "fmin z9.s, p4/M, z9.s, z1.s\n"
- "fmin z10.s, p4/M, z10.s, z1.s\n"
- "fmin z11.s, p4/M, z11.s, z1.s\n"
- "fmin z12.s, p4/M, z12.s, z1.s\n"
- "fmin z13.s, p4/M, z13.s, z1.s\n"
- "fmin z14.s, p4/M, z14.s, z1.s\n"
- "fmin z15.s, p4/M, z15.s, z1.s\n"
- "fmin z16.s, p4/M, z16.s, z1.s\n"
- "fmin z17.s, p4/M, z17.s, z1.s\n"
- "fmin z18.s, p4/M, z18.s, z1.s\n"
- "fmin z19.s, p4/M, z19.s, z1.s\n"
- "fmax z8.s, p4/M, z8.s, z0.s\n"
- "fmax z9.s, p4/M, z9.s, z0.s\n"
- "fmax z10.s, p4/M, z10.s, z0.s\n"
- "fmax z11.s, p4/M, z11.s, z0.s\n"
- "fmax z12.s, p4/M, z12.s, z0.s\n"
- "fmax z13.s, p4/M, z13.s, z0.s\n"
- "fmax z14.s, p4/M, z14.s, z0.s\n"
- "fmax z15.s, p4/M, z15.s, z0.s\n"
- "fmax z16.s, p4/M, z16.s, z0.s\n"
- "fmax z17.s, p4/M, z17.s, z0.s\n"
- "fmax z18.s, p4/M, z18.s, z0.s\n"
- "fmax z19.s, p4/M, z19.s, z0.s\n"
+ "ld1rw { z20.s }, p4/Z, [x20]\n"
+ "fmin z8.s, p4/M, z8.s, z21.s\n"
+ "fmin z9.s, p4/M, z9.s, z21.s\n"
+ "fmin z10.s, p4/M, z10.s, z21.s\n"
+ "fmin z11.s, p4/M, z11.s, z21.s\n"
+ "fmin z12.s, p4/M, z12.s, z21.s\n"
+ "fmin z13.s, p4/M, z13.s, z21.s\n"
+ "fmin z14.s, p4/M, z14.s, z21.s\n"
+ "fmin z15.s, p4/M, z15.s, z21.s\n"
+ "fmin z16.s, p4/M, z16.s, z21.s\n"
+ "fmin z17.s, p4/M, z17.s, z21.s\n"
+ "fmin z18.s, p4/M, z18.s, z21.s\n"
+ "fmin z19.s, p4/M, z19.s, z21.s\n"
+ "fmax z8.s, p4/M, z8.s, z20.s\n"
+ "fmax z9.s, p4/M, z9.s, z20.s\n"
+ "fmax z10.s, p4/M, z10.s, z20.s\n"
+ "fmax z11.s, p4/M, z11.s, z20.s\n"
+ "fmax z12.s, p4/M, z12.s, z20.s\n"
+ "fmax z13.s, p4/M, z13.s, z20.s\n"
+ "fmax z14.s, p4/M, z14.s, z20.s\n"
+ "fmax z15.s, p4/M, z15.s, z20.s\n"
+ "fmax z16.s, p4/M, z16.s, z20.s\n"
+ "fmax z17.s, p4/M, z17.s, z20.s\n"
+ "fmax z18.s, p4/M, z18.s, z20.s\n"
+ "fmax z19.s, p4/M, z19.s, z20.s\n"
"35:" // Height 3: No activation
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
@@ -576,25 +576,25 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x22]\n"
+ "ld1w { z13.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x21]\n"
+ "ld1w { z17.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x20]\n"
+ "ld1w { z21.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 41f\n"
"40:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -618,14 +618,14 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"42:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 43f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
"cbnz x28, 44f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #2\n"
@@ -635,9 +635,9 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 44f\n"
"43:" // Height 4: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x25, x26, x21, LSL #2\n"
+ "add x24, x25, x21, LSL #2\n"
+ "add x23, x24, x21, LSL #2\n"
"44:" // Height 4: input setup done
"subs x27, x27, #0x1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -654,7 +654,7 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"subs x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x10, #2, MUL VL]\n"
"add x25, x25, #0x4\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
@@ -662,19 +662,19 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x23, x23, #0x4\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
- "fmla z18.s, p4/M, z6.s, z2.s\n"
- "fmla z22.s, p4/M, z6.s, z3.s\n"
+ "fmla z10.s, p4/M, z25.s, z0.s\n"
+ "fmla z14.s, p4/M, z25.s, z1.s\n"
+ "fmla z18.s, p4/M, z25.s, z2.s\n"
+ "fmla z22.s, p4/M, z25.s, z3.s\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z11.s, p4/M, z24.s, z0.s\n"
+ "fmla z15.s, p4/M, z24.s, z1.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
- "fmla z19.s, p4/M, z7.s, z2.s\n"
- "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "fmla z19.s, p4/M, z24.s, z2.s\n"
+ "fmla z23.s, p4/M, z24.s, z3.s\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
@@ -686,22 +686,22 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x10, #2, MUL VL]\n"
"cmp x28, x20\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
- "fmla z18.s, p4/M, z6.s, z2.s\n"
- "fmla z22.s, p4/M, z6.s, z3.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
- "fmla z19.s, p4/M, z7.s, z2.s\n"
- "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "fmla z10.s, p4/M, z25.s, z0.s\n"
+ "fmla z14.s, p4/M, z25.s, z1.s\n"
+ "fmla z18.s, p4/M, z25.s, z2.s\n"
+ "fmla z22.s, p4/M, z25.s, z3.s\n"
+ "fmla z11.s, p4/M, z24.s, z0.s\n"
+ "fmla z15.s, p4/M, z24.s, z1.s\n"
+ "fmla z19.s, p4/M, z24.s, z2.s\n"
+ "fmla z23.s, p4/M, z24.s, z3.s\n"
"bne 42b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #2\n"
@@ -709,41 +709,41 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 47f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
+ "ld1rw { z25.s }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
- "fmin z8.s, p4/M, z8.s, z1.s\n"
- "fmin z9.s, p4/M, z9.s, z1.s\n"
- "fmin z10.s, p4/M, z10.s, z1.s\n"
- "fmin z11.s, p4/M, z11.s, z1.s\n"
- "fmin z12.s, p4/M, z12.s, z1.s\n"
- "fmin z13.s, p4/M, z13.s, z1.s\n"
- "fmin z14.s, p4/M, z14.s, z1.s\n"
- "fmin z15.s, p4/M, z15.s, z1.s\n"
- "fmin z16.s, p4/M, z16.s, z1.s\n"
- "fmin z17.s, p4/M, z17.s, z1.s\n"
- "fmin z18.s, p4/M, z18.s, z1.s\n"
- "fmin z19.s, p4/M, z19.s, z1.s\n"
- "fmin z20.s, p4/M, z20.s, z1.s\n"
- "fmin z21.s, p4/M, z21.s, z1.s\n"
- "fmin z22.s, p4/M, z22.s, z1.s\n"
- "fmin z23.s, p4/M, z23.s, z1.s\n"
- "fmax z8.s, p4/M, z8.s, z0.s\n"
- "fmax z9.s, p4/M, z9.s, z0.s\n"
- "fmax z10.s, p4/M, z10.s, z0.s\n"
- "fmax z11.s, p4/M, z11.s, z0.s\n"
- "fmax z12.s, p4/M, z12.s, z0.s\n"
- "fmax z13.s, p4/M, z13.s, z0.s\n"
- "fmax z14.s, p4/M, z14.s, z0.s\n"
- "fmax z15.s, p4/M, z15.s, z0.s\n"
- "fmax z16.s, p4/M, z16.s, z0.s\n"
- "fmax z17.s, p4/M, z17.s, z0.s\n"
- "fmax z18.s, p4/M, z18.s, z0.s\n"
- "fmax z19.s, p4/M, z19.s, z0.s\n"
- "fmax z20.s, p4/M, z20.s, z0.s\n"
- "fmax z21.s, p4/M, z21.s, z0.s\n"
- "fmax z22.s, p4/M, z22.s, z0.s\n"
- "fmax z23.s, p4/M, z23.s, z0.s\n"
+ "ld1rw { z24.s }, p4/Z, [x20]\n"
+ "fmin z8.s, p4/M, z8.s, z25.s\n"
+ "fmin z9.s, p4/M, z9.s, z25.s\n"
+ "fmin z10.s, p4/M, z10.s, z25.s\n"
+ "fmin z11.s, p4/M, z11.s, z25.s\n"
+ "fmin z12.s, p4/M, z12.s, z25.s\n"
+ "fmin z13.s, p4/M, z13.s, z25.s\n"
+ "fmin z14.s, p4/M, z14.s, z25.s\n"
+ "fmin z15.s, p4/M, z15.s, z25.s\n"
+ "fmin z16.s, p4/M, z16.s, z25.s\n"
+ "fmin z17.s, p4/M, z17.s, z25.s\n"
+ "fmin z18.s, p4/M, z18.s, z25.s\n"
+ "fmin z19.s, p4/M, z19.s, z25.s\n"
+ "fmin z20.s, p4/M, z20.s, z25.s\n"
+ "fmin z21.s, p4/M, z21.s, z25.s\n"
+ "fmin z22.s, p4/M, z22.s, z25.s\n"
+ "fmin z23.s, p4/M, z23.s, z25.s\n"
+ "fmax z8.s, p4/M, z8.s, z24.s\n"
+ "fmax z9.s, p4/M, z9.s, z24.s\n"
+ "fmax z10.s, p4/M, z10.s, z24.s\n"
+ "fmax z11.s, p4/M, z11.s, z24.s\n"
+ "fmax z12.s, p4/M, z12.s, z24.s\n"
+ "fmax z13.s, p4/M, z13.s, z24.s\n"
+ "fmax z14.s, p4/M, z14.s, z24.s\n"
+ "fmax z15.s, p4/M, z15.s, z24.s\n"
+ "fmax z16.s, p4/M, z16.s, z24.s\n"
+ "fmax z17.s, p4/M, z17.s, z24.s\n"
+ "fmax z18.s, p4/M, z18.s, z24.s\n"
+ "fmax z19.s, p4/M, z19.s, z24.s\n"
+ "fmax z20.s, p4/M, z20.s, z24.s\n"
+ "fmax z21.s, p4/M, z21.s, z24.s\n"
+ "fmax z22.s, p4/M, z22.s, z24.s\n"
+ "fmax z23.s, p4/M, z23.s, z24.s\n"
"47:" // Height 4: No activation
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
@@ -807,30 +807,30 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x23, x9, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x9]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x22]\n"
- "ld1w { z25.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x23]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x22]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x21]\n"
+ "ld1w { z21.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x20]\n"
+ "ld1w { z25.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 53f\n"
"52:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -858,15 +858,15 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"54:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 55f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
"cbnz x28, 56f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #2\n"
@@ -877,10 +877,10 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 56f\n"
"55:" // Height 5: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x25, x26, x21, LSL #2\n"
+ "add x24, x25, x21, LSL #2\n"
+ "add x23, x24, x21, LSL #2\n"
+ "add x22, x23, x21, LSL #2\n"
"56:" // Height 5: input setup done
"subs x27, x27, #0x1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -902,29 +902,29 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x24, x24, #0x4\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z29.s }, p4/Z, [x10, #2, MUL VL]\n"
"add x23, x23, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"add x22, x22, #0x4\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
- "fmla z18.s, p4/M, z6.s, z2.s\n"
- "fmla z22.s, p4/M, z6.s, z3.s\n"
- "fmla z26.s, p4/M, z6.s, z4.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z10.s, p4/M, z29.s, z0.s\n"
+ "fmla z14.s, p4/M, z29.s, z1.s\n"
+ "fmla z18.s, p4/M, z29.s, z2.s\n"
+ "fmla z22.s, p4/M, z29.s, z3.s\n"
+ "fmla z26.s, p4/M, z29.s, z4.s\n"
+ "fmla z11.s, p4/M, z28.s, z0.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
- "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z15.s, p4/M, z28.s, z1.s\n"
+ "fmla z19.s, p4/M, z28.s, z2.s\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
- "fmla z23.s, p4/M, z7.s, z3.s\n"
- "fmla z27.s, p4/M, z7.s, z4.s\n"
+ "fmla z23.s, p4/M, z28.s, z3.s\n"
+ "fmla z27.s, p4/M, z28.s, z4.s\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
"ld1rw { z4.s }, p4/Z, [x22]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
@@ -939,23 +939,23 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cmp x28, x20\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
- "ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z29.s }, p4/Z, [x10, #2, MUL VL]\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
- "ld1w { z7.s }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.s, p4/M, z6.s, z0.s\n"
- "fmla z14.s, p4/M, z6.s, z1.s\n"
- "fmla z18.s, p4/M, z6.s, z2.s\n"
- "fmla z22.s, p4/M, z6.s, z3.s\n"
- "fmla z26.s, p4/M, z6.s, z4.s\n"
- "fmla z11.s, p4/M, z7.s, z0.s\n"
- "fmla z15.s, p4/M, z7.s, z1.s\n"
- "fmla z19.s, p4/M, z7.s, z2.s\n"
- "fmla z23.s, p4/M, z7.s, z3.s\n"
- "fmla z27.s, p4/M, z7.s, z4.s\n"
+ "fmla z10.s, p4/M, z29.s, z0.s\n"
+ "fmla z14.s, p4/M, z29.s, z1.s\n"
+ "fmla z18.s, p4/M, z29.s, z2.s\n"
+ "fmla z22.s, p4/M, z29.s, z3.s\n"
+ "fmla z26.s, p4/M, z29.s, z4.s\n"
+ "fmla z11.s, p4/M, z28.s, z0.s\n"
+ "fmla z15.s, p4/M, z28.s, z1.s\n"
+ "fmla z19.s, p4/M, z28.s, z2.s\n"
+ "fmla z23.s, p4/M, z28.s, z3.s\n"
+ "fmla z27.s, p4/M, z28.s, z4.s\n"
"bne 54b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #2\n"
@@ -964,49 +964,49 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 59f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
+ "ld1rw { z29.s }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rw { z0.s }, p4/Z, [x20]\n"
- "fmin z8.s, p4/M, z8.s, z1.s\n"
- "fmin z9.s, p4/M, z9.s, z1.s\n"
- "fmin z10.s, p4/M, z10.s, z1.s\n"
- "fmin z11.s, p4/M, z11.s, z1.s\n"
- "fmin z12.s, p4/M, z12.s, z1.s\n"
- "fmin z13.s, p4/M, z13.s, z1.s\n"
- "fmin z14.s, p4/M, z14.s, z1.s\n"
- "fmin z15.s, p4/M, z15.s, z1.s\n"
- "fmin z16.s, p4/M, z16.s, z1.s\n"
- "fmin z17.s, p4/M, z17.s, z1.s\n"
- "fmin z18.s, p4/M, z18.s, z1.s\n"
- "fmin z19.s, p4/M, z19.s, z1.s\n"
- "fmin z20.s, p4/M, z20.s, z1.s\n"
- "fmin z21.s, p4/M, z21.s, z1.s\n"
- "fmin z22.s, p4/M, z22.s, z1.s\n"
- "fmin z23.s, p4/M, z23.s, z1.s\n"
- "fmin z24.s, p4/M, z24.s, z1.s\n"
- "fmin z25.s, p4/M, z25.s, z1.s\n"
- "fmin z26.s, p4/M, z26.s, z1.s\n"
- "fmin z27.s, p4/M, z27.s, z1.s\n"
- "fmax z8.s, p4/M, z8.s, z0.s\n"
- "fmax z9.s, p4/M, z9.s, z0.s\n"
- "fmax z10.s, p4/M, z10.s, z0.s\n"
- "fmax z11.s, p4/M, z11.s, z0.s\n"
- "fmax z12.s, p4/M, z12.s, z0.s\n"
- "fmax z13.s, p4/M, z13.s, z0.s\n"
- "fmax z14.s, p4/M, z14.s, z0.s\n"
- "fmax z15.s, p4/M, z15.s, z0.s\n"
- "fmax z16.s, p4/M, z16.s, z0.s\n"
- "fmax z17.s, p4/M, z17.s, z0.s\n"
- "fmax z18.s, p4/M, z18.s, z0.s\n"
- "fmax z19.s, p4/M, z19.s, z0.s\n"
- "fmax z20.s, p4/M, z20.s, z0.s\n"
- "fmax z21.s, p4/M, z21.s, z0.s\n"
- "fmax z22.s, p4/M, z22.s, z0.s\n"
- "fmax z23.s, p4/M, z23.s, z0.s\n"
- "fmax z24.s, p4/M, z24.s, z0.s\n"
- "fmax z25.s, p4/M, z25.s, z0.s\n"
- "fmax z26.s, p4/M, z26.s, z0.s\n"
- "fmax z27.s, p4/M, z27.s, z0.s\n"
+ "ld1rw { z28.s }, p4/Z, [x20]\n"
+ "fmin z8.s, p4/M, z8.s, z29.s\n"
+ "fmin z9.s, p4/M, z9.s, z29.s\n"
+ "fmin z10.s, p4/M, z10.s, z29.s\n"
+ "fmin z11.s, p4/M, z11.s, z29.s\n"
+ "fmin z12.s, p4/M, z12.s, z29.s\n"
+ "fmin z13.s, p4/M, z13.s, z29.s\n"
+ "fmin z14.s, p4/M, z14.s, z29.s\n"
+ "fmin z15.s, p4/M, z15.s, z29.s\n"
+ "fmin z16.s, p4/M, z16.s, z29.s\n"
+ "fmin z17.s, p4/M, z17.s, z29.s\n"
+ "fmin z18.s, p4/M, z18.s, z29.s\n"
+ "fmin z19.s, p4/M, z19.s, z29.s\n"
+ "fmin z20.s, p4/M, z20.s, z29.s\n"
+ "fmin z21.s, p4/M, z21.s, z29.s\n"
+ "fmin z22.s, p4/M, z22.s, z29.s\n"
+ "fmin z23.s, p4/M, z23.s, z29.s\n"
+ "fmin z24.s, p4/M, z24.s, z29.s\n"
+ "fmin z25.s, p4/M, z25.s, z29.s\n"
+ "fmin z26.s, p4/M, z26.s, z29.s\n"
+ "fmin z27.s, p4/M, z27.s, z29.s\n"
+ "fmax z8.s, p4/M, z8.s, z28.s\n"
+ "fmax z9.s, p4/M, z9.s, z28.s\n"
+ "fmax z10.s, p4/M, z10.s, z28.s\n"
+ "fmax z11.s, p4/M, z11.s, z28.s\n"
+ "fmax z12.s, p4/M, z12.s, z28.s\n"
+ "fmax z13.s, p4/M, z13.s, z28.s\n"
+ "fmax z14.s, p4/M, z14.s, z28.s\n"
+ "fmax z15.s, p4/M, z15.s, z28.s\n"
+ "fmax z16.s, p4/M, z16.s, z28.s\n"
+ "fmax z17.s, p4/M, z17.s, z28.s\n"
+ "fmax z18.s, p4/M, z18.s, z28.s\n"
+ "fmax z19.s, p4/M, z19.s, z28.s\n"
+ "fmax z20.s, p4/M, z20.s, z28.s\n"
+ "fmax z21.s, p4/M, z21.s, z28.s\n"
+ "fmax z22.s, p4/M, z22.s, z28.s\n"
+ "fmax z23.s, p4/M, z23.s, z28.s\n"
+ "fmax z24.s, p4/M, z24.s, z28.s\n"
+ "fmax z25.s, p4/M, z25.s, z28.s\n"
+ "fmax z26.s, p4/M, z26.s, z28.s\n"
+ "fmax z27.s, p4/M, z27.s, z28.s\n"
"59:" // Height 5: No activation
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
@@ -1081,35 +1081,35 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x9]\n"
+ "add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x9]\n"
"add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1w { z12.s }, p3/Z, [x25]\n"
- "ld1w { z13.s }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1w { z14.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z15.s }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1w { z19.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z20.s }, p3/Z, [x23]\n"
- "ld1w { z21.s }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z23.s }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1w { z24.s }, p3/Z, [x22]\n"
- "ld1w { z25.s }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z27.s }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1w { z28.s }, p3/Z, [x21]\n"
- "ld1w { z29.s }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1w { z30.s }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1w { z31.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p3/Z, [x20]\n"
+ "ld1w { z29.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p0/Z, [x20, #3, MUL VL]\n"
"b 65f\n"
"64:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1141,16 +1141,16 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"66:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x21, [x20, #0x28]\n"
"cbnz x28, 68f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #2\n"
@@ -1162,11 +1162,11 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 68f\n"
"67:" // Height 6: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x25, x26, x21, LSL #2\n"
+ "add x24, x25, x21, LSL #2\n"
+ "add x23, x24, x21, LSL #2\n"
+ "add x22, x23, x21, LSL #2\n"
+ "add x21, x22, x21, LSL #2\n"
"68:" // Height 6: input setup done
"subs x27, x27, #0x1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -1355,7 +1355,6 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"74:" // Exit
-
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
@@ -1363,4 +1362,4 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
}
} // namespace arm_gemm
-#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // ARM_COMPUTE_ENABLE_SVE