aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp678
1 files changed, 339 insertions, 339 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
index 5f093bf08a..66601bd312 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
@@ -163,11 +163,11 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"7:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 8f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
"cbnz x28, 9f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -183,12 +183,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"10:" // Height 1: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z17.h }, p4/Z, [x10]\n"
+ "ld1h { z16.h }, p4/Z, [x9]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
"add x26, x26, #0x2\n"
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -201,12 +201,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z17.h }, p4/Z, [x10]\n"
+ "ld1h { z16.h }, p4/Z, [x9]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
@@ -214,17 +214,17 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"bne 7b\n"
"tbz %x[flags], #1, 12f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z17.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "ld1rh { z16.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z17.h\n"
+ "fmin z9.h, p4/M, z9.h, z17.h\n"
+ "fmin z10.h, p4/M, z10.h, z17.h\n"
+ "fmin z11.h, p4/M, z11.h, z17.h\n"
+ "fmax z8.h, p4/M, z8.h, z16.h\n"
+ "fmax z9.h, p4/M, z9.h, z16.h\n"
+ "fmax z10.h, p4/M, z10.h, z16.h\n"
+ "fmax z11.h, p4/M, z11.h, z16.h\n"
"12:" // Height 1: No activation
"st1h { z8.h }, p3, [x13]\n"
"st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
@@ -285,15 +285,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"17:" // Height 2: no bias
"tbz %x[flags], #0, 18f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x20, x13, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x20]\n"
+ "ld1h { z13.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 19f\n"
"18:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -309,12 +309,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"20:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 21f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
"cbnz x28, 22f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -322,7 +322,7 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 22f\n"
"21:" // Height 2: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
"22:" // Height 2: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -333,19 +333,19 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"23:" // Height 2: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z17.h }, p4/Z, [x10]\n"
"addvl x12, x12, #1\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z16.h }, p4/Z, [x9]\n"
"addvl x11, x11, #1\n"
"add x26, x26, #0x2\n"
"subs x27, x27, #0x1\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z14.h, p4/M, z17.h, z1.h\n"
"add x25, x25, #0x2\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
+ "fmla z15.h, p4/M, z16.h, z1.h\n"
"addvl x10, x10, #1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
@@ -357,18 +357,18 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z17.h }, p4/Z, [x10]\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z16.h }, p4/Z, [x9]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z14.h, p4/M, z17.h, z1.h\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
+ "fmla z15.h, p4/M, z16.h, z1.h\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
"bne 20b\n"
@@ -376,25 +376,25 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x25, x13, x20, LSL #1\n"
"tbz %x[flags], #1, 25f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z17.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
+ "ld1rh { z16.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z17.h\n"
+ "fmin z9.h, p4/M, z9.h, z17.h\n"
+ "fmin z10.h, p4/M, z10.h, z17.h\n"
+ "fmin z11.h, p4/M, z11.h, z17.h\n"
+ "fmin z12.h, p4/M, z12.h, z17.h\n"
+ "fmin z13.h, p4/M, z13.h, z17.h\n"
+ "fmin z14.h, p4/M, z14.h, z17.h\n"
+ "fmin z15.h, p4/M, z15.h, z17.h\n"
+ "fmax z8.h, p4/M, z8.h, z16.h\n"
+ "fmax z9.h, p4/M, z9.h, z16.h\n"
+ "fmax z10.h, p4/M, z10.h, z16.h\n"
+ "fmax z11.h, p4/M, z11.h, z16.h\n"
+ "fmax z12.h, p4/M, z12.h, z16.h\n"
+ "fmax z13.h, p4/M, z13.h, z16.h\n"
+ "fmax z14.h, p4/M, z14.h, z16.h\n"
+ "fmax z15.h, p4/M, z15.h, z16.h\n"
"25:" // Height 2: No activation
"st1h { z8.h }, p3, [x13]\n"
"st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
@@ -463,20 +463,20 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"30:" // Height 3: no bias
"tbz %x[flags], #0, 31f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x21, x13, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x21]\n"
+ "ld1h { z13.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x20]\n"
+ "ld1h { z17.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 32f\n"
"31:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -496,13 +496,13 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"33:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 34f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
"cbnz x28, 35f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -511,8 +511,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 35f\n"
"34:" // Height 3: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
"35:" // Height 3: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -528,22 +528,22 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"addvl x11, x11, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z21.h }, p4/Z, [x10]\n"
"add x26, x26, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z20.h }, p4/Z, [x9]\n"
"subs x27, x27, #0x1\n"
"add x25, x25, #0x2\n"
"add x24, x24, #0x2\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z10.h, p4/M, z21.h, z0.h\n"
+ "fmla z14.h, p4/M, z21.h, z1.h\n"
+ "fmla z18.h, p4/M, z21.h, z2.h\n"
+ "fmla z11.h, p4/M, z20.h, z0.h\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z15.h, p4/M, z20.h, z1.h\n"
+ "fmla z19.h, p4/M, z20.h, z2.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
@@ -557,54 +557,54 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z21.h }, p4/Z, [x10]\n"
"cmp x28, x20\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z20.h }, p4/Z, [x9]\n"
"addvl x12, x12, #1\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z10.h, p4/M, z21.h, z0.h\n"
+ "fmla z14.h, p4/M, z21.h, z1.h\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z18.h, p4/M, z21.h, z2.h\n"
+ "fmla z11.h, p4/M, z20.h, z0.h\n"
"addvl x9, x9, #1\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z15.h, p4/M, z20.h, z1.h\n"
+ "fmla z19.h, p4/M, z20.h, z2.h\n"
"bne 33b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x13, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"tbz %x[flags], #1, 38f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z21.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmin z16.h, p4/M, z16.h, z1.h\n"
- "fmin z17.h, p4/M, z17.h, z1.h\n"
- "fmin z18.h, p4/M, z18.h, z1.h\n"
- "fmin z19.h, p4/M, z19.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
- "fmax z16.h, p4/M, z16.h, z0.h\n"
- "fmax z17.h, p4/M, z17.h, z0.h\n"
- "fmax z18.h, p4/M, z18.h, z0.h\n"
- "fmax z19.h, p4/M, z19.h, z0.h\n"
+ "ld1rh { z20.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z21.h\n"
+ "fmin z9.h, p4/M, z9.h, z21.h\n"
+ "fmin z10.h, p4/M, z10.h, z21.h\n"
+ "fmin z11.h, p4/M, z11.h, z21.h\n"
+ "fmin z12.h, p4/M, z12.h, z21.h\n"
+ "fmin z13.h, p4/M, z13.h, z21.h\n"
+ "fmin z14.h, p4/M, z14.h, z21.h\n"
+ "fmin z15.h, p4/M, z15.h, z21.h\n"
+ "fmin z16.h, p4/M, z16.h, z21.h\n"
+ "fmin z17.h, p4/M, z17.h, z21.h\n"
+ "fmin z18.h, p4/M, z18.h, z21.h\n"
+ "fmin z19.h, p4/M, z19.h, z21.h\n"
+ "fmax z8.h, p4/M, z8.h, z20.h\n"
+ "fmax z9.h, p4/M, z9.h, z20.h\n"
+ "fmax z10.h, p4/M, z10.h, z20.h\n"
+ "fmax z11.h, p4/M, z11.h, z20.h\n"
+ "fmax z12.h, p4/M, z12.h, z20.h\n"
+ "fmax z13.h, p4/M, z13.h, z20.h\n"
+ "fmax z14.h, p4/M, z14.h, z20.h\n"
+ "fmax z15.h, p4/M, z15.h, z20.h\n"
+ "fmax z16.h, p4/M, z16.h, z20.h\n"
+ "fmax z17.h, p4/M, z17.h, z20.h\n"
+ "fmax z18.h, p4/M, z18.h, z20.h\n"
+ "fmax z19.h, p4/M, z19.h, z20.h\n"
"38:" // Height 3: No activation
"st1h { z8.h }, p3, [x13]\n"
"st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
@@ -681,25 +681,25 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"43:" // Height 4: no bias
"tbz %x[flags], #0, 44f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x22, x13, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x22]\n"
+ "ld1h { z13.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x21]\n"
+ "ld1h { z17.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x20]\n"
+ "ld1h { z21.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 45f\n"
"44:" // Height 4: no accumulate
"mov z8.b, #0x0\n"
@@ -723,14 +723,14 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 47f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
"cbnz x28, 48f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -740,9 +740,9 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 48f\n"
"47:" // Height 4: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
+ "add x23, x24, x21, LSL #1\n"
"48:" // Height 4: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -759,7 +759,7 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"addvl x11, x11, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z25.h }, p4/Z, [x10]\n"
"add x26, x26, #0x2\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
@@ -767,22 +767,22 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x25, x25, #0x2\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z24.h }, p4/Z, [x9]\n"
"add x24, x24, #0x2\n"
"add x23, x23, #0x2\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z10.h, p4/M, z25.h, z0.h\n"
+ "fmla z14.h, p4/M, z25.h, z1.h\n"
"addvl x10, x10, #1\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "fmla z18.h, p4/M, z25.h, z2.h\n"
+ "fmla z22.h, p4/M, z25.h, z3.h\n"
"addvl x9, x9, #1\n"
"ld1h { z6.h }, p4/Z, [x12]\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z11.h, p4/M, z24.h, z0.h\n"
+ "fmla z15.h, p4/M, z24.h, z1.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "fmla z19.h, p4/M, z24.h, z2.h\n"
+ "fmla z23.h, p4/M, z24.h, z3.h\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
"ld1rh { z3.h }, p4/Z, [x23]\n"
"ld1h { z7.h }, p4/Z, [x11]\n"
@@ -794,7 +794,7 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z25.h }, p4/Z, [x10]\n"
"cmp x28, x20\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
@@ -802,17 +802,17 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"addvl x11, x11, #1\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z24.h }, p4/Z, [x9]\n"
"addvl x10, x10, #1\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z10.h, p4/M, z25.h, z0.h\n"
+ "fmla z14.h, p4/M, z25.h, z1.h\n"
"addvl x9, x9, #1\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "fmla z18.h, p4/M, z25.h, z2.h\n"
+ "fmla z22.h, p4/M, z25.h, z3.h\n"
+ "fmla z11.h, p4/M, z24.h, z0.h\n"
+ "fmla z15.h, p4/M, z24.h, z1.h\n"
+ "fmla z19.h, p4/M, z24.h, z2.h\n"
+ "fmla z23.h, p4/M, z24.h, z3.h\n"
"bne 46b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x13, x20, LSL #1\n"
@@ -820,41 +820,41 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x23, x24, x20, LSL #1\n"
"tbz %x[flags], #1, 51f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z25.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmin z16.h, p4/M, z16.h, z1.h\n"
- "fmin z17.h, p4/M, z17.h, z1.h\n"
- "fmin z18.h, p4/M, z18.h, z1.h\n"
- "fmin z19.h, p4/M, z19.h, z1.h\n"
- "fmin z20.h, p4/M, z20.h, z1.h\n"
- "fmin z21.h, p4/M, z21.h, z1.h\n"
- "fmin z22.h, p4/M, z22.h, z1.h\n"
- "fmin z23.h, p4/M, z23.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
- "fmax z16.h, p4/M, z16.h, z0.h\n"
- "fmax z17.h, p4/M, z17.h, z0.h\n"
- "fmax z18.h, p4/M, z18.h, z0.h\n"
- "fmax z19.h, p4/M, z19.h, z0.h\n"
- "fmax z20.h, p4/M, z20.h, z0.h\n"
- "fmax z21.h, p4/M, z21.h, z0.h\n"
- "fmax z22.h, p4/M, z22.h, z0.h\n"
- "fmax z23.h, p4/M, z23.h, z0.h\n"
+ "ld1rh { z24.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z25.h\n"
+ "fmin z9.h, p4/M, z9.h, z25.h\n"
+ "fmin z10.h, p4/M, z10.h, z25.h\n"
+ "fmin z11.h, p4/M, z11.h, z25.h\n"
+ "fmin z12.h, p4/M, z12.h, z25.h\n"
+ "fmin z13.h, p4/M, z13.h, z25.h\n"
+ "fmin z14.h, p4/M, z14.h, z25.h\n"
+ "fmin z15.h, p4/M, z15.h, z25.h\n"
+ "fmin z16.h, p4/M, z16.h, z25.h\n"
+ "fmin z17.h, p4/M, z17.h, z25.h\n"
+ "fmin z18.h, p4/M, z18.h, z25.h\n"
+ "fmin z19.h, p4/M, z19.h, z25.h\n"
+ "fmin z20.h, p4/M, z20.h, z25.h\n"
+ "fmin z21.h, p4/M, z21.h, z25.h\n"
+ "fmin z22.h, p4/M, z22.h, z25.h\n"
+ "fmin z23.h, p4/M, z23.h, z25.h\n"
+ "fmax z8.h, p4/M, z8.h, z24.h\n"
+ "fmax z9.h, p4/M, z9.h, z24.h\n"
+ "fmax z10.h, p4/M, z10.h, z24.h\n"
+ "fmax z11.h, p4/M, z11.h, z24.h\n"
+ "fmax z12.h, p4/M, z12.h, z24.h\n"
+ "fmax z13.h, p4/M, z13.h, z24.h\n"
+ "fmax z14.h, p4/M, z14.h, z24.h\n"
+ "fmax z15.h, p4/M, z15.h, z24.h\n"
+ "fmax z16.h, p4/M, z16.h, z24.h\n"
+ "fmax z17.h, p4/M, z17.h, z24.h\n"
+ "fmax z18.h, p4/M, z18.h, z24.h\n"
+ "fmax z19.h, p4/M, z19.h, z24.h\n"
+ "fmax z20.h, p4/M, z20.h, z24.h\n"
+ "fmax z21.h, p4/M, z21.h, z24.h\n"
+ "fmax z22.h, p4/M, z22.h, z24.h\n"
+ "fmax z23.h, p4/M, z23.h, z24.h\n"
"51:" // Height 4: No activation
"st1h { z8.h }, p3, [x13]\n"
"st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
@@ -939,30 +939,30 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"56:" // Height 5: no bias
"tbz %x[flags], #0, 57f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x23, x13, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x13]\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x23]\n"
+ "ld1h { z13.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x22]\n"
+ "ld1h { z17.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x21]\n"
+ "ld1h { z21.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x20]\n"
+ "ld1h { z25.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 58f\n"
"57:" // Height 5: no accumulate
"mov z8.b, #0x0\n"
@@ -990,15 +990,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"59:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 60f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
"cbnz x28, 61f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -1009,10 +1009,10 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 61f\n"
"60:" // Height 5: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
+ "add x23, x24, x21, LSL #1\n"
+ "add x22, x23, x21, LSL #1\n"
"61:" // Height 5: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -1034,7 +1034,7 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"subs x27, x27, #0x1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z29.h }, p4/Z, [x10]\n"
"add x25, x25, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
@@ -1042,24 +1042,24 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x23, x23, #0x2\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z28.h }, p4/Z, [x9]\n"
"add x22, x22, #0x2\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z10.h, p4/M, z29.h, z0.h\n"
+ "fmla z14.h, p4/M, z29.h, z1.h\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
- "fmla z26.h, p4/M, z6.h, z4.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z18.h, p4/M, z29.h, z2.h\n"
+ "fmla z22.h, p4/M, z29.h, z3.h\n"
+ "fmla z26.h, p4/M, z29.h, z4.h\n"
+ "fmla z11.h, p4/M, z28.h, z0.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1h { z6.h }, p4/Z, [x12]\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z15.h, p4/M, z28.h, z1.h\n"
+ "fmla z19.h, p4/M, z28.h, z2.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
- "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "fmla z23.h, p4/M, z28.h, z3.h\n"
+ "fmla z27.h, p4/M, z28.h, z4.h\n"
"ld1rh { z3.h }, p4/Z, [x23]\n"
"ld1rh { z4.h }, p4/Z, [x22]\n"
"ld1h { z7.h }, p4/Z, [x11]\n"
@@ -1075,25 +1075,25 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"addvl x12, x12, #1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
+ "ld1h { z29.h }, p4/Z, [x10]\n"
"addvl x11, x11, #1\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"addvl x10, x10, #1\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
- "ld1h { z7.h }, p4/Z, [x9]\n"
+ "ld1h { z28.h }, p4/Z, [x9]\n"
"addvl x9, x9, #1\n"
- "fmla z10.h, p4/M, z6.h, z0.h\n"
- "fmla z14.h, p4/M, z6.h, z1.h\n"
- "fmla z18.h, p4/M, z6.h, z2.h\n"
- "fmla z22.h, p4/M, z6.h, z3.h\n"
- "fmla z26.h, p4/M, z6.h, z4.h\n"
- "fmla z11.h, p4/M, z7.h, z0.h\n"
- "fmla z15.h, p4/M, z7.h, z1.h\n"
- "fmla z19.h, p4/M, z7.h, z2.h\n"
- "fmla z23.h, p4/M, z7.h, z3.h\n"
- "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "fmla z10.h, p4/M, z29.h, z0.h\n"
+ "fmla z14.h, p4/M, z29.h, z1.h\n"
+ "fmla z18.h, p4/M, z29.h, z2.h\n"
+ "fmla z22.h, p4/M, z29.h, z3.h\n"
+ "fmla z26.h, p4/M, z29.h, z4.h\n"
+ "fmla z11.h, p4/M, z28.h, z0.h\n"
+ "fmla z15.h, p4/M, z28.h, z1.h\n"
+ "fmla z19.h, p4/M, z28.h, z2.h\n"
+ "fmla z23.h, p4/M, z28.h, z3.h\n"
+ "fmla z27.h, p4/M, z28.h, z4.h\n"
"bne 59b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"add x25, x13, x20, LSL #1\n"
@@ -1102,49 +1102,49 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x22, x23, x20, LSL #1\n"
"tbz %x[flags], #1, 64f\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "ld1rh { z29.h }, p4/Z, [x20]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
- "ld1rh { z0.h }, p4/Z, [x20]\n"
- "fmin z8.h, p4/M, z8.h, z1.h\n"
- "fmin z9.h, p4/M, z9.h, z1.h\n"
- "fmin z10.h, p4/M, z10.h, z1.h\n"
- "fmin z11.h, p4/M, z11.h, z1.h\n"
- "fmin z12.h, p4/M, z12.h, z1.h\n"
- "fmin z13.h, p4/M, z13.h, z1.h\n"
- "fmin z14.h, p4/M, z14.h, z1.h\n"
- "fmin z15.h, p4/M, z15.h, z1.h\n"
- "fmin z16.h, p4/M, z16.h, z1.h\n"
- "fmin z17.h, p4/M, z17.h, z1.h\n"
- "fmin z18.h, p4/M, z18.h, z1.h\n"
- "fmin z19.h, p4/M, z19.h, z1.h\n"
- "fmin z20.h, p4/M, z20.h, z1.h\n"
- "fmin z21.h, p4/M, z21.h, z1.h\n"
- "fmin z22.h, p4/M, z22.h, z1.h\n"
- "fmin z23.h, p4/M, z23.h, z1.h\n"
- "fmin z24.h, p4/M, z24.h, z1.h\n"
- "fmin z25.h, p4/M, z25.h, z1.h\n"
- "fmin z26.h, p4/M, z26.h, z1.h\n"
- "fmin z27.h, p4/M, z27.h, z1.h\n"
- "fmax z8.h, p4/M, z8.h, z0.h\n"
- "fmax z9.h, p4/M, z9.h, z0.h\n"
- "fmax z10.h, p4/M, z10.h, z0.h\n"
- "fmax z11.h, p4/M, z11.h, z0.h\n"
- "fmax z12.h, p4/M, z12.h, z0.h\n"
- "fmax z13.h, p4/M, z13.h, z0.h\n"
- "fmax z14.h, p4/M, z14.h, z0.h\n"
- "fmax z15.h, p4/M, z15.h, z0.h\n"
- "fmax z16.h, p4/M, z16.h, z0.h\n"
- "fmax z17.h, p4/M, z17.h, z0.h\n"
- "fmax z18.h, p4/M, z18.h, z0.h\n"
- "fmax z19.h, p4/M, z19.h, z0.h\n"
- "fmax z20.h, p4/M, z20.h, z0.h\n"
- "fmax z21.h, p4/M, z21.h, z0.h\n"
- "fmax z22.h, p4/M, z22.h, z0.h\n"
- "fmax z23.h, p4/M, z23.h, z0.h\n"
- "fmax z24.h, p4/M, z24.h, z0.h\n"
- "fmax z25.h, p4/M, z25.h, z0.h\n"
- "fmax z26.h, p4/M, z26.h, z0.h\n"
- "fmax z27.h, p4/M, z27.h, z0.h\n"
+ "ld1rh { z28.h }, p4/Z, [x20]\n"
+ "fmin z8.h, p4/M, z8.h, z29.h\n"
+ "fmin z9.h, p4/M, z9.h, z29.h\n"
+ "fmin z10.h, p4/M, z10.h, z29.h\n"
+ "fmin z11.h, p4/M, z11.h, z29.h\n"
+ "fmin z12.h, p4/M, z12.h, z29.h\n"
+ "fmin z13.h, p4/M, z13.h, z29.h\n"
+ "fmin z14.h, p4/M, z14.h, z29.h\n"
+ "fmin z15.h, p4/M, z15.h, z29.h\n"
+ "fmin z16.h, p4/M, z16.h, z29.h\n"
+ "fmin z17.h, p4/M, z17.h, z29.h\n"
+ "fmin z18.h, p4/M, z18.h, z29.h\n"
+ "fmin z19.h, p4/M, z19.h, z29.h\n"
+ "fmin z20.h, p4/M, z20.h, z29.h\n"
+ "fmin z21.h, p4/M, z21.h, z29.h\n"
+ "fmin z22.h, p4/M, z22.h, z29.h\n"
+ "fmin z23.h, p4/M, z23.h, z29.h\n"
+ "fmin z24.h, p4/M, z24.h, z29.h\n"
+ "fmin z25.h, p4/M, z25.h, z29.h\n"
+ "fmin z26.h, p4/M, z26.h, z29.h\n"
+ "fmin z27.h, p4/M, z27.h, z29.h\n"
+ "fmax z8.h, p4/M, z8.h, z28.h\n"
+ "fmax z9.h, p4/M, z9.h, z28.h\n"
+ "fmax z10.h, p4/M, z10.h, z28.h\n"
+ "fmax z11.h, p4/M, z11.h, z28.h\n"
+ "fmax z12.h, p4/M, z12.h, z28.h\n"
+ "fmax z13.h, p4/M, z13.h, z28.h\n"
+ "fmax z14.h, p4/M, z14.h, z28.h\n"
+ "fmax z15.h, p4/M, z15.h, z28.h\n"
+ "fmax z16.h, p4/M, z16.h, z28.h\n"
+ "fmax z17.h, p4/M, z17.h, z28.h\n"
+ "fmax z18.h, p4/M, z18.h, z28.h\n"
+ "fmax z19.h, p4/M, z19.h, z28.h\n"
+ "fmax z20.h, p4/M, z20.h, z28.h\n"
+ "fmax z21.h, p4/M, z21.h, z28.h\n"
+ "fmax z22.h, p4/M, z22.h, z28.h\n"
+ "fmax z23.h, p4/M, z23.h, z28.h\n"
+ "fmax z24.h, p4/M, z24.h, z28.h\n"
+ "fmax z25.h, p4/M, z25.h, z28.h\n"
+ "fmax z26.h, p4/M, z26.h, z28.h\n"
+ "fmax z27.h, p4/M, z27.h, z28.h\n"
"64:" // Height 5: No activation
"st1h { z8.h }, p3, [x13]\n"
"st1h { z9.h }, p2, [x13, #1, MUL VL]\n"
@@ -1240,35 +1240,35 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"69:" // Height 6: no bias
"tbz %x[flags], #0, 70f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "ld1h { z8.h }, p3/Z, [x13]\n"
+ "add x24, x13, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x13]\n"
"add x22, x23, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
- "ld1h { z12.h }, p3/Z, [x25]\n"
- "ld1h { z13.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z14.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z15.h }, p0/Z, [x25, #3, MUL VL]\n"
- "ld1h { z16.h }, p3/Z, [x24]\n"
- "ld1h { z17.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z20.h }, p3/Z, [x23]\n"
- "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z22.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x23, #3, MUL VL]\n"
- "ld1h { z24.h }, p3/Z, [x22]\n"
- "ld1h { z25.h }, p2/Z, [x22, #1, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x22, #3, MUL VL]\n"
- "ld1h { z28.h }, p3/Z, [x21]\n"
- "ld1h { z29.h }, p2/Z, [x21, #1, MUL VL]\n"
- "ld1h { z30.h }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1h { z31.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p3/Z, [x20]\n"
+ "ld1h { z29.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p0/Z, [x20, #3, MUL VL]\n"
"b 71f\n"
"70:" // Height 6: no accumulate
"mov z8.b, #0x0\n"
@@ -1300,16 +1300,16 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"72:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 73f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x21, [x20, #0x28]\n"
"cbnz x28, 74f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20, LSL #1\n"
@@ -1321,11 +1321,11 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 74f\n"
"73:" // Height 6: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x25, x26, x21, LSL #1\n"
+ "add x24, x25, x21, LSL #1\n"
+ "add x23, x24, x21, LSL #1\n"
+ "add x22, x23, x21, LSL #1\n"
+ "add x21, x22, x21, LSL #1\n"
"74:" // Height 6: input setup done
"subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -1527,4 +1527,4 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
}
} // namespace arm_gemm
-#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // ARM_COMPUTE_ENABLE_SVE