aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp679
1 files changed, 339 insertions, 340 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
index a70e66cbe4..d1a9bb4a26 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
@@ -139,11 +139,11 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 7f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
"cbnz x28, 8f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -159,12 +159,12 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"9:" // Height 1: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z17.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x26, x26, #0x2\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
@@ -174,27 +174,27 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z17.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
"addvl x10, x10, #4\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z17.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "ld1rh { z16.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z17.h\n"
+ "fmin z9.h, p4/M, z9.h, z17.h\n"
+ "fmin z10.h, p4/M, z10.h, z17.h\n"
+ "fmin z11.h, p4/M, z11.h, z17.h\n"
+ "fmax z8.h, p4/M, z8.h, z16.h\n"
+ "fmax z9.h, p4/M, z9.h, z16.h\n"
+ "fmax z10.h, p4/M, z10.h, z16.h\n"
+ "fmax z11.h, p4/M, z11.h, z16.h\n"
"11:" // Height 1: No activation
"st1h { z8.h }, p3, [x9]\n"
"st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
@@ -234,15 +234,15 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x20, x9, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x20]\n"
+ "ld1h { z13.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 17f\n"
"16:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -258,12 +258,12 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 19f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
"cbnz x28, 20f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -271,7 +271,7 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 20f\n"
"19:" // Height 2: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
"20:" // Height 2: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -282,18 +282,18 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"21:" // Height 2: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z17.h }, p4/Z, [x10, #2, MUL VL]\n"
"add x26, x26, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"subs x27, x27, #0x1\n"
"add x25, x25, #0x2\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z14.h, p4/M, z17.h, z1.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
+ "fmla z15.h, p4/M, z16.h, z1.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
@@ -303,41 +303,41 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z17.h }, p4/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z14.h, p4/M, z17.h, z1.h\n"
"addvl x10, x10, #4\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
+ "fmla z15.h, p4/M, z16.h, z1.h\n"
"bne 18b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #1\n"
"tbz %x[flags], #1, 23f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z17.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
+ "ld1rh { z16.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z17.h\n"
+ "fmin z9.h, p4/M, z9.h, z17.h\n"
+ "fmin z10.h, p4/M, z10.h, z17.h\n"
+ "fmin z11.h, p4/M, z11.h, z17.h\n"
+ "fmin z12.h, p4/M, z12.h, z17.h\n"
+ "fmin z13.h, p4/M, z13.h, z17.h\n"
+ "fmin z14.h, p4/M, z14.h, z17.h\n"
+ "fmin z15.h, p4/M, z15.h, z17.h\n"
+ "fmax z8.h, p4/M, z8.h, z16.h\n"
+ "fmax z9.h, p4/M, z9.h, z16.h\n"
+ "fmax z10.h, p4/M, z10.h, z16.h\n"
+ "fmax z11.h, p4/M, z11.h, z16.h\n"
+ "fmax z12.h, p4/M, z12.h, z16.h\n"
+ "fmax z13.h, p4/M, z13.h, z16.h\n"
+ "fmax z14.h, p4/M, z14.h, z16.h\n"
+ "fmax z15.h, p4/M, z15.h, z16.h\n"
"23:" // Height 2: No activation
"st1h { z8.h }, p3, [x9]\n"
"st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
@@ -385,20 +385,20 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x21, x9, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x21]\n"
+ "ld1h { z13.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x20]\n"
+ "ld1h { z17.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 29f\n"
"28:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -418,13 +418,13 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
"cbnz x28, 32f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -433,8 +433,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 32f\n"
"31:" // Height 3: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
"32:" // Height 3: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -450,21 +450,21 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"subs x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z21.h }, p4/Z, [x10, #2, MUL VL]\n"
"add x25, x25, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x24, x24, #0x2\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z10.h, p4/M, z21.h, z0.h\n"
+ "fmla z14.h, p4/M, z21.h, z1.h\n"
+ "fmla z18.h, p4/M, z21.h, z2.h\n"
+ "fmla z11.h, p4/M, z20.h, z0.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z15.h, p4/M, z20.h, z1.h\n"
+ "fmla z19.h, p4/M, z20.h, z2.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
@@ -476,51 +476,51 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z21.h }, p4/Z, [x10, #2, MUL VL]\n"
"cmp x28, x20\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z10.h, p4/M, z21.h, z0.h\n"
+ "fmla z14.h, p4/M, z21.h, z1.h\n"
+ "fmla z18.h, p4/M, z21.h, z2.h\n"
+ "fmla z11.h, p4/M, z20.h, z0.h\n"
+ "fmla z15.h, p4/M, z20.h, z1.h\n"
+ "fmla z19.h, p4/M, z20.h, z2.h\n"
"bne 30b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"tbz %x[flags], #1, 35f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z21.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmin z16.h, p4/M, z16.h, z1.h\n"
- "fmin z17.h, p4/M, z17.h, z1.h\n"
- "fmin z18.h, p4/M, z18.h, z1.h\n"
- "fmin z19.h, p4/M, z19.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
- "fmax z16.h, p4/M, z16.h, z0.h\n"
- "fmax z17.h, p4/M, z17.h, z0.h\n"
- "fmax z18.h, p4/M, z18.h, z0.h\n"
- "fmax z19.h, p4/M, z19.h, z0.h\n"
+ "ld1rh { z20.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z21.h\n"
+ "fmin z9.h, p4/M, z9.h, z21.h\n"
+ "fmin z10.h, p4/M, z10.h, z21.h\n"
+ "fmin z11.h, p4/M, z11.h, z21.h\n"
+ "fmin z12.h, p4/M, z12.h, z21.h\n"
+ "fmin z13.h, p4/M, z13.h, z21.h\n"
+ "fmin z14.h, p4/M, z14.h, z21.h\n"
+ "fmin z15.h, p4/M, z15.h, z21.h\n"
+ "fmin z16.h, p4/M, z16.h, z21.h\n"
+ "fmin z17.h, p4/M, z17.h, z21.h\n"
+ "fmin z18.h, p4/M, z18.h, z21.h\n"
+ "fmin z19.h, p4/M, z19.h, z21.h\n"
+ "fmax z8.h, p4/M, z8.h, z20.h\n"
+ "fmax z9.h, p4/M, z9.h, z20.h\n"
+ "fmax z10.h, p4/M, z10.h, z20.h\n"
+ "fmax z11.h, p4/M, z11.h, z20.h\n"
+ "fmax z12.h, p4/M, z12.h, z20.h\n"
+ "fmax z13.h, p4/M, z13.h, z20.h\n"
+ "fmax z14.h, p4/M, z14.h, z20.h\n"
+ "fmax z15.h, p4/M, z15.h, z20.h\n"
+ "fmax z16.h, p4/M, z16.h, z20.h\n"
+ "fmax z17.h, p4/M, z17.h, z20.h\n"
+ "fmax z18.h, p4/M, z18.h, z20.h\n"
+ "fmax z19.h, p4/M, z19.h, z20.h\n"
"35:" // Height 3: No activation
"st1h { z8.h }, p3, [x9]\n"
"st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
@@ -576,25 +576,25 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x22, x9, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x22]\n"
+ "ld1h { z13.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x21]\n"
+ "ld1h { z17.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x20]\n"
+ "ld1h { z21.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 41f\n"
"40:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -618,14 +618,14 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"42:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 43f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
"cbnz x28, 44f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -635,9 +635,9 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 44f\n"
"43:" // Height 4: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
+ "add x23, x24, x21, LSL #1\n"
"44:" // Height 4: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -654,7 +654,7 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"subs x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z25.h }, p4/Z, [x10, #2, MUL VL]\n"
"add x25, x25, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
@@ -662,19 +662,19 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x23, x23, #0x2\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "fmla z10.h, p4/M, z25.h, z0.h\n"
+ "fmla z14.h, p4/M, z25.h, z1.h\n"
+ "fmla z18.h, p4/M, z25.h, z2.h\n"
+ "fmla z22.h, p4/M, z25.h, z3.h\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z11.h, p4/M, z24.h, z0.h\n"
+ "fmla z15.h, p4/M, z24.h, z1.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "fmla z19.h, p4/M, z24.h, z2.h\n"
+ "fmla z23.h, p4/M, z24.h, z3.h\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
"ld1rh { z3.h }, p4/Z, [x23]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
@@ -686,22 +686,22 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z25.h }, p4/Z, [x10, #2, MUL VL]\n"
"cmp x28, x20\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "fmla z10.h, p4/M, z25.h, z0.h\n"
+ "fmla z14.h, p4/M, z25.h, z1.h\n"
+ "fmla z18.h, p4/M, z25.h, z2.h\n"
+ "fmla z22.h, p4/M, z25.h, z3.h\n"
+ "fmla z11.h, p4/M, z24.h, z0.h\n"
+ "fmla z15.h, p4/M, z24.h, z1.h\n"
+ "fmla z19.h, p4/M, z24.h, z2.h\n"
+ "fmla z23.h, p4/M, z24.h, z3.h\n"
"bne 42b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #1\n"
@@ -709,41 +709,41 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x23, x24, x20, LSL #1\n"
"tbz %x[flags], #1, 47f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z25.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmin z16.h, p4/M, z16.h, z1.h\n"
- "fmin z17.h, p4/M, z17.h, z1.h\n"
- "fmin z18.h, p4/M, z18.h, z1.h\n"
- "fmin z19.h, p4/M, z19.h, z1.h\n"
- "fmin z20.h, p4/M, z20.h, z1.h\n"
- "fmin z21.h, p4/M, z21.h, z1.h\n"
- "fmin z22.h, p4/M, z22.h, z1.h\n"
- "fmin z23.h, p4/M, z23.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
- "fmax z16.h, p4/M, z16.h, z0.h\n"
- "fmax z17.h, p4/M, z17.h, z0.h\n"
- "fmax z18.h, p4/M, z18.h, z0.h\n"
- "fmax z19.h, p4/M, z19.h, z0.h\n"
- "fmax z20.h, p4/M, z20.h, z0.h\n"
- "fmax z21.h, p4/M, z21.h, z0.h\n"
- "fmax z22.h, p4/M, z22.h, z0.h\n"
- "fmax z23.h, p4/M, z23.h, z0.h\n"
+ "ld1rh { z24.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z25.h\n"
+ "fmin z9.h, p4/M, z9.h, z25.h\n"
+ "fmin z10.h, p4/M, z10.h, z25.h\n"
+ "fmin z11.h, p4/M, z11.h, z25.h\n"
+ "fmin z12.h, p4/M, z12.h, z25.h\n"
+ "fmin z13.h, p4/M, z13.h, z25.h\n"
+ "fmin z14.h, p4/M, z14.h, z25.h\n"
+ "fmin z15.h, p4/M, z15.h, z25.h\n"
+ "fmin z16.h, p4/M, z16.h, z25.h\n"
+ "fmin z17.h, p4/M, z17.h, z25.h\n"
+ "fmin z18.h, p4/M, z18.h, z25.h\n"
+ "fmin z19.h, p4/M, z19.h, z25.h\n"
+ "fmin z20.h, p4/M, z20.h, z25.h\n"
+ "fmin z21.h, p4/M, z21.h, z25.h\n"
+ "fmin z22.h, p4/M, z22.h, z25.h\n"
+ "fmin z23.h, p4/M, z23.h, z25.h\n"
+ "fmax z8.h, p4/M, z8.h, z24.h\n"
+ "fmax z9.h, p4/M, z9.h, z24.h\n"
+ "fmax z10.h, p4/M, z10.h, z24.h\n"
+ "fmax z11.h, p4/M, z11.h, z24.h\n"
+ "fmax z12.h, p4/M, z12.h, z24.h\n"
+ "fmax z13.h, p4/M, z13.h, z24.h\n"
+ "fmax z14.h, p4/M, z14.h, z24.h\n"
+ "fmax z15.h, p4/M, z15.h, z24.h\n"
+ "fmax z16.h, p4/M, z16.h, z24.h\n"
+ "fmax z17.h, p4/M, z17.h, z24.h\n"
+ "fmax z18.h, p4/M, z18.h, z24.h\n"
+ "fmax z19.h, p4/M, z19.h, z24.h\n"
+ "fmax z20.h, p4/M, z20.h, z24.h\n"
+ "fmax z21.h, p4/M, z21.h, z24.h\n"
+ "fmax z22.h, p4/M, z22.h, z24.h\n"
+ "fmax z23.h, p4/M, z23.h, z24.h\n"
"47:" // Height 4: No activation
"st1h { z8.h }, p3, [x9]\n"
"st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
@@ -807,30 +807,30 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x23, x9, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x9]\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x23]\n"
+ "ld1h { z13.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x22]\n"
+ "ld1h { z17.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x21]\n"
+ "ld1h { z21.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x20]\n"
+ "ld1h { z25.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 53f\n"
"52:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -858,15 +858,15 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"54:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 55f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
"cbnz x28, 56f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -877,10 +877,10 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 56f\n"
"55:" // Height 5: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
+ "add x23, x24, x21, LSL #1\n"
+ "add x22, x23, x21, LSL #1\n"
"56:" // Height 5: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -902,29 +902,29 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x24, x24, #0x2\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z29.h }, p4/Z, [x10, #2, MUL VL]\n"
"add x23, x23, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"add x22, x22, #0x2\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z28.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
- "fmla z26.h, p4/M, z6.h, z4.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z10.h, p4/M, z29.h, z0.h\n"
+ "fmla z14.h, p4/M, z29.h, z1.h\n"
+ "fmla z18.h, p4/M, z29.h, z2.h\n"
+ "fmla z22.h, p4/M, z29.h, z3.h\n"
+ "fmla z26.h, p4/M, z29.h, z4.h\n"
+ "fmla z11.h, p4/M, z28.h, z0.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z15.h, p4/M, z28.h, z1.h\n"
+ "fmla z19.h, p4/M, z28.h, z2.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
- "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "fmla z23.h, p4/M, z28.h, z3.h\n"
+ "fmla z27.h, p4/M, z28.h, z4.h\n"
"ld1rh { z3.h }, p4/Z, [x23]\n"
"ld1rh { z4.h }, p4/Z, [x22]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
@@ -939,23 +939,23 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cmp x28, x20\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z29.h }, p4/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z28.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
- "fmla z26.h, p4/M, z6.h, z4.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
- "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "fmla z10.h, p4/M, z29.h, z0.h\n"
+ "fmla z14.h, p4/M, z29.h, z1.h\n"
+ "fmla z18.h, p4/M, z29.h, z2.h\n"
+ "fmla z22.h, p4/M, z29.h, z3.h\n"
+ "fmla z26.h, p4/M, z29.h, z4.h\n"
+ "fmla z11.h, p4/M, z28.h, z0.h\n"
+ "fmla z15.h, p4/M, z28.h, z1.h\n"
+ "fmla z19.h, p4/M, z28.h, z2.h\n"
+ "fmla z23.h, p4/M, z28.h, z3.h\n"
+ "fmla z27.h, p4/M, z28.h, z4.h\n"
"bne 54b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x9, x20, LSL #1\n"
@@ -964,49 +964,49 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x22, x23, x20, LSL #1\n"
"tbz %x[flags], #1, 59f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z29.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmin z16.h, p4/M, z16.h, z1.h\n"
- "fmin z17.h, p4/M, z17.h, z1.h\n"
- "fmin z18.h, p4/M, z18.h, z1.h\n"
- "fmin z19.h, p4/M, z19.h, z1.h\n"
- "fmin z20.h, p4/M, z20.h, z1.h\n"
- "fmin z21.h, p4/M, z21.h, z1.h\n"
- "fmin z22.h, p4/M, z22.h, z1.h\n"
- "fmin z23.h, p4/M, z23.h, z1.h\n"
- "fmin z24.h, p4/M, z24.h, z1.h\n"
- "fmin z25.h, p4/M, z25.h, z1.h\n"
- "fmin z26.h, p4/M, z26.h, z1.h\n"
- "fmin z27.h, p4/M, z27.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
- "fmax z16.h, p4/M, z16.h, z0.h\n"
- "fmax z17.h, p4/M, z17.h, z0.h\n"
- "fmax z18.h, p4/M, z18.h, z0.h\n"
- "fmax z19.h, p4/M, z19.h, z0.h\n"
- "fmax z20.h, p4/M, z20.h, z0.h\n"
- "fmax z21.h, p4/M, z21.h, z0.h\n"
- "fmax z22.h, p4/M, z22.h, z0.h\n"
- "fmax z23.h, p4/M, z23.h, z0.h\n"
- "fmax z24.h, p4/M, z24.h, z0.h\n"
- "fmax z25.h, p4/M, z25.h, z0.h\n"
- "fmax z26.h, p4/M, z26.h, z0.h\n"
- "fmax z27.h, p4/M, z27.h, z0.h\n"
+ "ld1rh { z28.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z29.h\n"
+ "fmin z9.h, p4/M, z9.h, z29.h\n"
+ "fmin z10.h, p4/M, z10.h, z29.h\n"
+ "fmin z11.h, p4/M, z11.h, z29.h\n"
+ "fmin z12.h, p4/M, z12.h, z29.h\n"
+ "fmin z13.h, p4/M, z13.h, z29.h\n"
+ "fmin z14.h, p4/M, z14.h, z29.h\n"
+ "fmin z15.h, p4/M, z15.h, z29.h\n"
+ "fmin z16.h, p4/M, z16.h, z29.h\n"
+ "fmin z17.h, p4/M, z17.h, z29.h\n"
+ "fmin z18.h, p4/M, z18.h, z29.h\n"
+ "fmin z19.h, p4/M, z19.h, z29.h\n"
+ "fmin z20.h, p4/M, z20.h, z29.h\n"
+ "fmin z21.h, p4/M, z21.h, z29.h\n"
+ "fmin z22.h, p4/M, z22.h, z29.h\n"
+ "fmin z23.h, p4/M, z23.h, z29.h\n"
+ "fmin z24.h, p4/M, z24.h, z29.h\n"
+ "fmin z25.h, p4/M, z25.h, z29.h\n"
+ "fmin z26.h, p4/M, z26.h, z29.h\n"
+ "fmin z27.h, p4/M, z27.h, z29.h\n"
+ "fmax z8.h, p4/M, z8.h, z28.h\n"
+ "fmax z9.h, p4/M, z9.h, z28.h\n"
+ "fmax z10.h, p4/M, z10.h, z28.h\n"
+ "fmax z11.h, p4/M, z11.h, z28.h\n"
+ "fmax z12.h, p4/M, z12.h, z28.h\n"
+ "fmax z13.h, p4/M, z13.h, z28.h\n"
+ "fmax z14.h, p4/M, z14.h, z28.h\n"
+ "fmax z15.h, p4/M, z15.h, z28.h\n"
+ "fmax z16.h, p4/M, z16.h, z28.h\n"
+ "fmax z17.h, p4/M, z17.h, z28.h\n"
+ "fmax z18.h, p4/M, z18.h, z28.h\n"
+ "fmax z19.h, p4/M, z19.h, z28.h\n"
+ "fmax z20.h, p4/M, z20.h, z28.h\n"
+ "fmax z21.h, p4/M, z21.h, z28.h\n"
+ "fmax z22.h, p4/M, z22.h, z28.h\n"
+ "fmax z23.h, p4/M, z23.h, z28.h\n"
+ "fmax z24.h, p4/M, z24.h, z28.h\n"
+ "fmax z25.h, p4/M, z25.h, z28.h\n"
+ "fmax z26.h, p4/M, z26.h, z28.h\n"
+ "fmax z27.h, p4/M, z27.h, z28.h\n"
"59:" // Height 5: No activation
"st1h { z8.h }, p3, [x9]\n"
"st1h { z9.h }, p2, [x9, #1, MUL VL]\n"
@@ -1081,35 +1081,35 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x9]\n"
+ "add x24, x9, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x9]\n"
"add x22, x23, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1h { z28.h }, p3/Z, [x21]\n"
- "ld1h { z29.h }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1h { z30.h }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1h { z31.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p3/Z, [x20]\n"
+ "ld1h { z29.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 65f\n"
"64:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1141,16 +1141,16 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"66:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 67f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x21, [x20, #0x28]\n"
"cbnz x28, 68f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -1162,11 +1162,11 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 68f\n"
"67:" // Height 6: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
+ "add x23, x24, x21, LSL #1\n"
+ "add x22, x23, x21, LSL #1\n"
+ "add x21, x22, x21, LSL #1\n"
"68:" // Height 6: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -1355,7 +1355,6 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"74:" // Exit
-
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
@@ -1363,4 +1362,4 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
}
} // namespace arm_gemm
-#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // ARM_COMPUTE_ENABLE_SVE