aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp1704
1 files changed, 851 insertions, 853 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
index f48623e129..f8a76b5244 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
@@ -77,7 +77,6 @@ void a64_hybrid_s8s32_mmla_6x16 (
ka.N = N;
ka.B_ptr = B_ptr;
__asm__ __volatile__(
-
"1:" // Row loop
"cmp %x[M], #0x6\n"
"bge 186f\n"
@@ -178,11 +177,11 @@ void a64_hybrid_s8s32_mmla_6x16 (
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 16f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
"cbnz x28, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20\n"
@@ -198,41 +197,41 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q6, [x10, #0x10]\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ "trn1 v19.2d, v1.2d, v20.2d\n"
+ ".inst 0x4e87a668 // smmla v8.4s, v19.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86a66c // smmla v12.4s, v19.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92a669 // smmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91a66d // smmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ "trn2 v1.2d, v1.2d, v20.2d\n"
+ ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x80]\n"
+ ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x90]\n"
+ ".inst 0x4e92a428 // smmla v8.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e91a42c // smmla v12.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92a429 // smmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91a42d // smmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92a42a // smmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
"sub x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
"cmp x27, #0x20\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
+ ".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
"ldr q1, [x26, #0x0]\n"
"add x10, x10, #0x100\n"
"ldr q7, [x10, #0x0]\n"
@@ -240,40 +239,40 @@ void a64_hybrid_s8s32_mmla_6x16 (
"prfm pldl1keep, [x26, #0x80]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ "trn1 v20.2d, v1.2d, v21.2d\n"
+ ".inst 0x4e87a688 // smmla v8.4s, v20.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86a68c // smmla v12.4s, v20.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92a689 // smmla v9.4s, v20.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91a68d // smmla v13.4s, v20.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92a68a // smmla v10.4s, v20.16b, v18.16b\n"
+ "ldr q19, [x10, #0x60]\n"
+ ".inst 0x4e91a68e // smmla v14.4s, v20.16b, v17.16b\n"
+ "ldr q18, [x10, #0x70]\n"
+ "trn2 v1.2d, v1.2d, v21.2d\n"
+ ".inst 0x4e93a68b // smmla v11.4s, v20.16b, v19.16b\n"
+ "ldr q17, [x10, #0x80]\n"
+ ".inst 0x4e92a68f // smmla v15.4s, v20.16b, v18.16b\n"
+ "ldr q19, [x10, #0x90]\n"
+ ".inst 0x4e91a428 // smmla v8.4s, v1.16b, v17.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e93a42c // smmla v12.4s, v1.16b, v19.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92a429 // smmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91a42d // smmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92a42a // smmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
"add x26, x26, #0x10\n"
"sub x27, x27, #0x10\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
+ ".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
"prfm pldl1keep, [x26, #0x80]\n"
"add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
@@ -281,26 +280,26 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr q7, [x10, #0x10]\n"
- ".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
- ".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
- ".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
- ".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
- ".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr d19, [x26], #0x8\n"
+ "ldr q18, [x10, #0x0]\n"
+ "trn1 v19.2d, v19.2d, v17.2d\n"
+ "ldr q17, [x10, #0x10]\n"
+ ".inst 0x4e92a668 // smmla v8.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e91a66c // smmla v12.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92a669 // smmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91a66d // smmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
"sub x27, x27, #0x8\n"
"cmp x27, #0x8\n"
- ".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
"add x10, x10, #0x80\n"
"bge 21b\n"
"22:" // Height 1: Multiply loop: Skip odd blocks
@@ -325,23 +324,23 @@ void a64_hybrid_s8s32_mmla_6x16 (
"25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b1, [x26, #0x0]\n"
"26:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
+ "ldr q23, [x10, #0x0]\n"
+ "ldr q18, [x10, #0x10]\n"
+ "trn1 v19.2d, v1.2d, v17.2d\n"
+ ".inst 0x4e97a668 // smmla v8.4s, v19.16b, v23.16b\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4e92a66c // smmla v12.4s, v19.16b, v18.16b\n"
+ "ldr q31, [x10, #0x30]\n"
+ ".inst 0x4e91a669 // smmla v9.4s, v19.16b, v17.16b\n"
+ "ldr q20, [x10, #0x40]\n"
+ ".inst 0x4e9fa66d // smmla v13.4s, v19.16b, v31.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e94a66a // smmla v10.4s, v19.16b, v20.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
"add x10, x10, #0x80\n"
"27:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -525,12 +524,12 @@ void a64_hybrid_s8s32_mmla_6x16 (
"52:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 53f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
"cbnz x28, 54f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20\n"
@@ -538,7 +537,7 @@ void a64_hybrid_s8s32_mmla_6x16 (
"b 54f\n"
"53:" // Height 2: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "add x25, x26, x21\n"
"54:" // Height 2: input setup done
"cmp x27, #0x10\n"
"blt 57f\n"
@@ -549,85 +548,85 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q6, [x10, #0x10]\n"
"blt 56f\n"
"55:" // Height 2: Multiply loop: Main loop head
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "trn1 v19.2d, v1.2d, v2.2d\n"
+ ".inst 0x4e87a668 // smmla v8.4s, v19.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86a66c // smmla v12.4s, v19.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92a669 // smmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91a66d // smmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x80]\n"
+ ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x90]\n"
+ ".inst 0x4e92a428 // smmla v8.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e91a42c // smmla v12.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92a429 // smmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91a42d // smmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92a42a // smmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
"sub x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"ldr q2, [x25, #0x0]\n"
"cmp x27, #0x20\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
"add x10, x10, #0x100\n"
"ldr q7, [x10, #0x0]\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
"ldr q1, [x26, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
"bge 55b\n"
"56:" // Height 2: Multiply loop: Single iteration only
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "trn1 v19.2d, v1.2d, v2.2d\n"
+ ".inst 0x4e87a668 // smmla v8.4s, v19.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86a66c // smmla v12.4s, v19.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92a669 // smmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91a66d // smmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x80]\n"
+ ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x90]\n"
+ ".inst 0x4e92a428 // smmla v8.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e91a42c // smmla v12.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92a429 // smmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91a42d // smmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92a42a // smmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
+ ".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
"sub x27, x27, #0x10\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
@@ -637,27 +636,27 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 59f\n"
"58:" // Height 2: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr d18, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "trn1 v19.2d, v18.2d, v17.2d\n"
"sub x27, x27, #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- ".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x20]\n"
- "ldr q7, [x10, #0x30]\n"
- ".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x40]\n"
- "ldr q7, [x10, #0x50]\n"
- ".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- "ldr q6, [x10, #0x60]\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q22, [x10, #0x10]\n"
+ ".inst 0x4e91a668 // smmla v8.4s, v19.16b, v17.16b\n"
+ ".inst 0x4e96a66c // smmla v12.4s, v19.16b, v22.16b\n"
+ "ldr q1, [x10, #0x20]\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e81a669 // smmla v9.4s, v19.16b, v1.16b\n"
+ ".inst 0x4e91a66d // smmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ "ldr q17, [x10, #0x70]\n"
"cmp x27, #0x8\n"
- ".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- ".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
"add x10, x10, #0x80\n"
"bge 58b\n"
"59:" // Height 2: Multiply loop: Skip odd blocks
@@ -689,23 +688,23 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr b1, [x26, #0x0]\n"
"ldr b2, [x25, #0x0]\n"
"63:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
+ "ldr q18, [x10, #0x0]\n"
+ "ldr q17, [x10, #0x10]\n"
+ "trn1 v19.2d, v1.2d, v2.2d\n"
+ ".inst 0x4e92a668 // smmla v8.4s, v19.16b, v18.16b\n"
+ "ldr q5, [x10, #0x20]\n"
+ ".inst 0x4e91a66c // smmla v12.4s, v19.16b, v17.16b\n"
+ "ldr q21, [x10, #0x30]\n"
+ ".inst 0x4e85a669 // smmla v9.4s, v19.16b, v5.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e95a66d // smmla v13.4s, v19.16b, v21.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
"add x10, x10, #0x80\n"
"64:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -953,13 +952,13 @@ void a64_hybrid_s8s32_mmla_6x16 (
"89:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 90f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
"cbnz x28, 91f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20\n"
@@ -968,8 +967,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"b 91f\n"
"90:" // Height 3: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
"91:" // Height 3: input setup done
"cmp x27, #0x10\n"
"blt 94f\n"
@@ -981,167 +980,167 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q6, [x10, #0x10]\n"
"blt 93f\n"
"92:" // Height 3: Multiply loop: Main loop head
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
+ "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ ".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
+ "trn2 v3.2d, v3.2d, v29.2d\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
"sub x27, x27, #0x10\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
"add x26, x26, #0x10\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
"add x25, x25, #0x10\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
"add x24, x24, #0x10\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
"cmp x27, #0x20\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
"ldr q2, [x25, #0x0]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e9aa428 // smmla v8.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
"prfm pldl1keep, [x26, #0x80]\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
"prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e9aa471 // smmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99a42d // smmla v13.4s, v1.16b, v25.16b\n"
"prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ ".inst 0x4e99a475 // smmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aa42a // smmla v10.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa472 // smmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99a42e // smmla v14.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99a476 // smmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e9aa42b // smmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa473 // smmla v19.4s, v3.16b, v26.16b\n"
"ldr q7, [x10, #0x0]\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e99a42f // smmla v15.4s, v1.16b, v25.16b\n"
"ldr q1, [x26, #0x0]\n"
- ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e99a477 // smmla v23.4s, v3.16b, v25.16b\n"
"ldr q3, [x24, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
"bge 92b\n"
"93:" // Height 3: Multiply loop: Single iteration only
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
+ "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ ".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
+ "trn2 v3.2d, v3.2d, v29.2d\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
"add x26, x26, #0x10\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
"add x25, x25, #0x10\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
"add x24, x24, #0x10\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
"sub x27, x27, #0x10\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
"prfm pldl1keep, [x26, #0x80]\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
+ ".inst 0x4e9aa428 // smmla v8.4s, v1.16b, v26.16b\n"
"prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
"prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ ".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa471 // smmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99a42d // smmla v13.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99a475 // smmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aa42a // smmla v10.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa472 // smmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99a42e // smmla v14.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99a476 // smmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e9aa42b // smmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa473 // smmla v19.4s, v3.16b, v26.16b\n"
+ ".inst 0x4e99a42f // smmla v15.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99a477 // smmla v23.4s, v3.16b, v25.16b\n"
"94:" // Height 3: Multiply loop: Main loop skip
"cbz x27, 101f\n"
"cmp x27, #0x8\n"
"blt 96f\n"
"95:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x10, #0x10]\n"
- ".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
- ".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
+ "ldr d26, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "trn1 v28.2d, v26.2d, v25.2d\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr q26, [x10, #0x0]\n"
+ "trn1 v27.2d, v25.2d, v27.2d\n"
+ ".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
+ "ldr q25, [x10, #0x10]\n"
+ ".inst 0x4e9aa770 // smmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e99a78c // smmla v12.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a774 // smmla v20.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
"sub x27, x27, #0x8\n"
- ".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
- ".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
"cmp x27, #0x8\n"
- ".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
- ".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
- ".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- ".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
- ".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
"bge 95b\n"
"96:" // Height 3: Multiply loop: Skip odd blocks
"cbz x27, 101f\n"
@@ -1179,33 +1178,33 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr b2, [x25, #0x0]\n"
"ldr b3, [x24, #0x0]\n"
"100:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q26, [x10, #0x0]\n"
+ "ldr q29, [x10, #0x10]\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
+ ".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa770 // smmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e9da78c // smmla v12.4s, v28.16b, v29.16b\n"
+ ".inst 0x4e9da774 // smmla v20.4s, v27.16b, v29.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
"101:" // Height 3: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -1499,14 +1498,14 @@ void a64_hybrid_s8s32_mmla_6x16 (
"126:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 127f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
"cbnz x28, 128f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20\n"
@@ -1516,9 +1515,9 @@ void a64_hybrid_s8s32_mmla_6x16 (
"b 128f\n"
"127:" // Height 4: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
"128:" // Height 4: input setup done
"cmp x27, #0x10\n"
"blt 131f\n"
@@ -1531,173 +1530,173 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q6, [x10, #0x10]\n"
"blt 130f\n"
"129:" // Height 4: Multiply loop: Main loop head
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
+ "trn1 v27.2d, v3.2d, v4.2d\n"
+ ".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ ".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
"add x26, x26, #0x10\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
"add x25, x25, #0x10\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
"add x24, x24, #0x10\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
"add x23, x23, #0x10\n"
"ldr q4, [x23, #0x0]\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
"ldr q2, [x25, #0x0]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e9aa428 // smmla v8.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
"cmp x27, #0x20\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
"prfm pldl1keep, [x26, #0x80]\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e9aa471 // smmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99a42d // smmla v13.4s, v1.16b, v25.16b\n"
"prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e99a475 // smmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aa42a // smmla v10.4s, v1.16b, v26.16b\n"
"prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e9aa472 // smmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99a42e // smmla v14.4s, v1.16b, v25.16b\n"
"prfm pldl1keep, [x23, #0x80]\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ ".inst 0x4e99a476 // smmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e9aa42b // smmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa473 // smmla v19.4s, v3.16b, v26.16b\n"
"ldr q7, [x10, #0x0]\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e99a42f // smmla v15.4s, v1.16b, v25.16b\n"
"ldr q1, [x26, #0x0]\n"
- ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e99a477 // smmla v23.4s, v3.16b, v25.16b\n"
"ldr q3, [x24, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
"bge 129b\n"
"130:" // Height 4: Multiply loop: Single iteration only
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
"add x26, x26, #0x10\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
+ "trn1 v27.2d, v3.2d, v4.2d\n"
+ ".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ ".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
"add x25, x25, #0x10\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
"add x24, x24, #0x10\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
"add x23, x23, #0x10\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
"sub x27, x27, #0x10\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
"prfm pldl1keep, [x26, #0x80]\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
- ".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
+ ".inst 0x4e9aa428 // smmla v8.4s, v1.16b, v26.16b\n"
"prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
"prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
"prfm pldl1keep, [x23, #0x80]\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ ".inst 0x4e9aa471 // smmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99a42d // smmla v13.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99a475 // smmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aa42a // smmla v10.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa472 // smmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99a42e // smmla v14.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99a476 // smmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e9aa42b // smmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aa473 // smmla v19.4s, v3.16b, v26.16b\n"
+ ".inst 0x4e99a42f // smmla v15.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99a477 // smmla v23.4s, v3.16b, v25.16b\n"
"131:" // Height 4: Multiply loop: Main loop skip
"cbz x27, 138f\n"
"cmp x27, #0x8\n"
"blt 133f\n"
"132:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr d26, [x26], #0x8\n"
+ "ldr d25, [x25], #0x8\n"
+ "trn1 v28.2d, v26.2d, v25.2d\n"
"sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr d26, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "trn1 v27.2d, v26.2d, v25.2d\n"
"cmp x27, #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- ".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
- ".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
- ".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
- ".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x60]\n"
- ".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
+ "ldr q26, [x10, #0x0]\n"
+ "ldr q25, [x10, #0x10]\n"
+ ".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa770 // smmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e99a78c // smmla v12.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a774 // smmla v20.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
- ".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
"bge 132b\n"
"133:" // Height 4: Multiply loop: Skip odd blocks
"cbz x27, 138f\n"
@@ -1742,33 +1741,33 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr b3, [x24, #0x0]\n"
"ldr b4, [x23, #0x0]\n"
"137:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "ldr q6, [x10, #0x10]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
+ "ldr q26, [x10, #0x0]\n"
+ "ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn1 v27.2d, v3.2d, v4.2d\n"
+ ".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa770 // smmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e99a78c // smmla v12.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a774 // smmla v20.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
"138:" // Height 4: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -2125,15 +2124,15 @@ void a64_hybrid_s8s32_mmla_6x16 (
"163:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 164f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
"cbnz x28, 165f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20\n"
@@ -2144,10 +2143,10 @@ void a64_hybrid_s8s32_mmla_6x16 (
"b 165f\n"
"164:" // Height 5: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
"165:" // Height 5: input setup done
"cmp x27, #0x10\n"
"blt 168f\n"
@@ -2160,174 +2159,174 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q7, [x10, #0x0]\n"
"blt 167f\n"
"166:" // Height 5: Multiply loop: Main loop head
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a4c8 // smmla v8.4s, v6.16b, v7.16b\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
- "trn1 v4.2d, v5.2d, v6.2d\n"
- "trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "trn1 v4.2d, v5.2d, v0.2d\n"
+ "trn2 v5.2d, v5.2d, v0.2d\n"
+ "ldr q0, [x10, #0x10]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a4cc // smmla v12.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a454 // smmla v20.4s, v2.16b, v0.16b\n"
"add x26, x26, #0x10\n"
- ".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e80a49c // smmla v28.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e87a4c9 // smmla v9.4s, v6.16b, v7.16b\n"
"add x25, x25, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
"add x24, x24, #0x10\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a4cd // smmla v13.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a455 // smmla v21.4s, v2.16b, v0.16b\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
- ".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e80a49d // smmla v29.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e87a4ca // smmla v10.4s, v6.16b, v7.16b\n"
"cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
"prfm pldl1keep, [x26, #0x80]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a4ce // smmla v14.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a456 // smmla v22.4s, v2.16b, v0.16b\n"
"prfm pldl1keep, [x25, #0x80]\n"
"prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e80a49e // smmla v30.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
+ ".inst 0x4e87a4cb // smmla v11.4s, v6.16b, v7.16b\n"
"prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a4cf // smmla v15.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a457 // smmla v23.4s, v2.16b, v0.16b\n"
"ldr q2, [x25, #0x0]\n"
- ".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ ".inst 0x4e80a49f // smmla v31.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x90]\n"
"ldr q4, [x23, #0x0]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ "ldr q6, [x10, #0xa0]\n"
+ ".inst 0x4e80a42c // smmla v12.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4bc // smmla v28.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xb0]\n"
+ ".inst 0x4e86a429 // smmla v9.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a471 // smmla v17.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a4b9 // smmla v25.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xc0]\n"
+ ".inst 0x4e80a42d // smmla v13.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4bd // smmla v29.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xd0]\n"
+ ".inst 0x4e86a42a // smmla v10.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a472 // smmla v18.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a4ba // smmla v26.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xe0]\n"
+ ".inst 0x4e80a42e // smmla v14.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4be // smmla v30.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
+ ".inst 0x4e86a42b // smmla v11.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a473 // smmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a4bb // smmla v27.4s, v5.16b, v6.16b\n"
"ldr q7, [x10, #0x0]\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e80a42f // smmla v15.4s, v1.16b, v0.16b\n"
"ldr q1, [x26, #0x0]\n"
- ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e80a477 // smmla v23.4s, v3.16b, v0.16b\n"
"ldr q3, [x24, #0x0]\n"
- ".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
+ ".inst 0x4e80a4bf // smmla v31.4s, v5.16b, v0.16b\n"
"ldr q5, [x22, #0x0]\n"
"bge 166b\n"
"167:" // Height 5: Multiply loop: Single iteration only
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a4c8 // smmla v8.4s, v6.16b, v7.16b\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
"add x26, x26, #0x10\n"
- "trn1 v4.2d, v5.2d, v6.2d\n"
- "trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
+ "trn1 v4.2d, v5.2d, v0.2d\n"
+ "trn2 v5.2d, v5.2d, v0.2d\n"
+ "ldr q0, [x10, #0x10]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a4cc // smmla v12.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a454 // smmla v20.4s, v2.16b, v0.16b\n"
"add x25, x25, #0x10\n"
- ".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e80a49c // smmla v28.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e87a4c9 // smmla v9.4s, v6.16b, v7.16b\n"
"add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
"add x23, x23, #0x10\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a4cd // smmla v13.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a455 // smmla v21.4s, v2.16b, v0.16b\n"
"add x22, x22, #0x10\n"
"sub x27, x27, #0x10\n"
- ".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e80a49d // smmla v29.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e87a4ca // smmla v10.4s, v6.16b, v7.16b\n"
"prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
"prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a4ce // smmla v14.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a456 // smmla v22.4s, v2.16b, v0.16b\n"
"prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
- ".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e80a49e // smmla v30.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
+ ".inst 0x4e87a4cb // smmla v11.4s, v6.16b, v7.16b\n"
"prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ ".inst 0x4e80a4cf // smmla v15.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80a457 // smmla v23.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80a49f // smmla v31.4s, v4.16b, v0.16b\n"
+ "ldr q2, [x10, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
+ "ldr q0, [x10, #0xa0]\n"
+ ".inst 0x4e82a42c // smmla v12.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82a474 // smmla v20.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82a4bc // smmla v28.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xb0]\n"
+ ".inst 0x4e80a429 // smmla v9.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a471 // smmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4b9 // smmla v25.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xc0]\n"
+ ".inst 0x4e82a42d // smmla v13.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82a475 // smmla v21.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82a4bd // smmla v29.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xd0]\n"
+ ".inst 0x4e80a42a // smmla v10.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a472 // smmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4ba // smmla v26.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xe0]\n"
+ ".inst 0x4e82a42e // smmla v14.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82a476 // smmla v22.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82a4be // smmla v30.4s, v5.16b, v2.16b\n"
"ldr q6, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
+ ".inst 0x4e80a42b // smmla v11.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a473 // smmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4bb // smmla v27.4s, v5.16b, v0.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
@@ -2337,48 +2336,48 @@ void a64_hybrid_s8s32_mmla_6x16 (
"blt 170f\n"
"169:" // Height 5: Multiply loop: Odd block loop
"ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr d0, [x25], #0x8\n"
+ "trn1 v4.2d, v1.2d, v0.2d\n"
+ "ldr d1, [x24], #0x8\n"
+ "ldr d0, [x23], #0x8\n"
+ "trn1 v3.2d, v1.2d, v0.2d\n"
"sub x27, x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr q6, [x10, #0x0]\n"
- "trn1 v4.2d, v5.2d, v7.2d\n"
- ".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- "ldr q7, [x10, #0x10]\n"
- ".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a498 // smmla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
+ "ldr d0, [x22], #0x8\n"
+ "ldr q1, [x10, #0x0]\n"
+ "trn1 v2.2d, v0.2d, v2.2d\n"
+ ".inst 0x4e81a488 // smmla v8.4s, v4.16b, v1.16b\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e81a470 // smmla v16.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x20]\n"
+ ".inst 0x4e80a48c // smmla v12.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
"cmp x27, #0x8\n"
- ".inst 0x4e87a49c // smmla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
- ".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a499 // smmla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
- ".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49d // smmla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
- ".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49a // smmla v26.4s, v4.16b, v6.16b\n"
+ ".inst 0x4e80a45c // smmla v28.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e81a489 // smmla v9.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81a471 // smmla v17.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a459 // smmla v25.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x40]\n"
+ ".inst 0x4e80a48d // smmla v13.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45d // smmla v29.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e81a48a // smmla v10.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81a472 // smmla v18.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45a // smmla v26.4s, v2.16b, v1.16b\n"
"ldr q6, [x10, #0x60]\n"
- ".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49e // smmla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
- ".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e80a48e // smmla v14.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45e // smmla v30.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
+ ".inst 0x4e86a48b // smmla v11.4s, v4.16b, v6.16b\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49b // smmla v27.4s, v4.16b, v6.16b\n"
- ".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49f // smmla v31.4s, v4.16b, v7.16b\n"
+ ".inst 0x4e86a473 // smmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a45b // smmla v27.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a48f // smmla v15.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a477 // smmla v23.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45f // smmla v31.4s, v2.16b, v0.16b\n"
"bge 169b\n"
"170:" // Height 5: Multiply loop: Skip odd blocks
"cbz x27, 175f\n"
@@ -2430,42 +2429,42 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr b4, [x23, #0x0]\n"
"ldr b5, [x22, #0x0]\n"
"174:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- "trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
+ "ldr q6, [x10, #0x0]\n"
+ "trn1 v7.2d, v1.2d, v2.2d\n"
+ "trn1 v3.2d, v3.2d, v4.2d\n"
+ "trn1 v2.2d, v5.2d, v0.2d\n"
+ "ldr q1, [x10, #0x10]\n"
+ ".inst 0x4e86a4e8 // smmla v8.4s, v7.16b, v6.16b\n"
+ ".inst 0x4e86a470 // smmla v16.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a458 // smmla v24.4s, v2.16b, v6.16b\n"
+ "ldr q0, [x10, #0x20]\n"
+ ".inst 0x4e81a4ec // smmla v12.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81a474 // smmla v20.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45c // smmla v28.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x30]\n"
+ ".inst 0x4e80a4e9 // smmla v9.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80a471 // smmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a459 // smmla v25.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x40]\n"
+ ".inst 0x4e81a4ed // smmla v13.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81a475 // smmla v21.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45d // smmla v29.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x50]\n"
+ ".inst 0x4e80a4ea // smmla v10.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80a472 // smmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45a // smmla v26.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x60]\n"
+ ".inst 0x4e81a4ee // smmla v14.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81a476 // smmla v22.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45e // smmla v30.4s, v2.16b, v1.16b\n"
"ldr q6, [x10, #0x70]\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
+ ".inst 0x4e80a4eb // smmla v11.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80a473 // smmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45b // smmla v27.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e86a4ef // smmla v15.4s, v7.16b, v6.16b\n"
+ ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a45f // smmla v31.4s, v2.16b, v6.16b\n"
"175:" // Height 5: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -2872,16 +2871,16 @@ void a64_hybrid_s8s32_mmla_6x16 (
"200:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 201f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x21, [x20, #0x28]\n"
"cbnz x28, 202f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x26, x26, x20\n"
@@ -2893,11 +2892,11 @@ void a64_hybrid_s8s32_mmla_6x16 (
"b 202f\n"
"201:" // Height 6: setup direct input
"mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
"202:" // Height 6: input setup done
"cmp x27, #0x10\n"
"blt 205f\n"
@@ -2964,42 +2963,42 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q2, [x25, #0x0]\n"
"prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q0, [x10, #0x90]\n"
"ldr q4, [x23, #0x0]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xf0]\n"
+ "ldr q6, [x10, #0xa0]\n"
+ ".inst 0x4e80a42c // smmla v12.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4bc // smmla v28.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xb0]\n"
+ ".inst 0x4e86a429 // smmla v9.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a471 // smmla v17.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a4b9 // smmla v25.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xc0]\n"
+ ".inst 0x4e80a42d // smmla v13.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4bd // smmla v29.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xd0]\n"
+ ".inst 0x4e86a42a // smmla v10.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a472 // smmla v18.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a4ba // smmla v26.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xe0]\n"
+ ".inst 0x4e80a42e // smmla v14.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4be // smmla v30.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
+ ".inst 0x4e86a42b // smmla v11.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86a473 // smmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a4bb // smmla v27.4s, v5.16b, v6.16b\n"
"ldr q7, [x10, #0x0]\n"
- ".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e80a42f // smmla v15.4s, v1.16b, v0.16b\n"
"ldr q1, [x26, #0x0]\n"
- ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e80a477 // smmla v23.4s, v3.16b, v0.16b\n"
"ldr q3, [x24, #0x0]\n"
- ".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
+ ".inst 0x4e80a4bf // smmla v31.4s, v5.16b, v0.16b\n"
"ldr q5, [x22, #0x0]\n"
"ldr q6, [x21, #0x0]\n"
"bge 203b\n"
@@ -3055,35 +3054,35 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x90]\n"
+ "ldr q2, [x10, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xa0]\n"
- ".inst 0x4e86a42c // smmla v12.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a474 // smmla v20.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bc // smmla v28.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xb0]\n"
- ".inst 0x4e87a429 // smmla v9.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a471 // smmla v17.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4b9 // smmla v25.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xc0]\n"
- ".inst 0x4e86a42d // smmla v13.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a475 // smmla v21.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4bd // smmla v29.4s, v5.16b, v6.16b\n"
- "ldr q6, [x10, #0xd0]\n"
- ".inst 0x4e87a42a // smmla v10.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a472 // smmla v18.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4ba // smmla v26.4s, v5.16b, v7.16b\n"
- "ldr q7, [x10, #0xe0]\n"
- ".inst 0x4e86a42e // smmla v14.4s, v1.16b, v6.16b\n"
- ".inst 0x4e86a476 // smmla v22.4s, v3.16b, v6.16b\n"
- ".inst 0x4e86a4be // smmla v30.4s, v5.16b, v6.16b\n"
+ "ldr q0, [x10, #0xa0]\n"
+ ".inst 0x4e82a42c // smmla v12.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82a474 // smmla v20.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82a4bc // smmla v28.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xb0]\n"
+ ".inst 0x4e80a429 // smmla v9.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a471 // smmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4b9 // smmla v25.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xc0]\n"
+ ".inst 0x4e82a42d // smmla v13.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82a475 // smmla v21.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82a4bd // smmla v29.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xd0]\n"
+ ".inst 0x4e80a42a // smmla v10.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a472 // smmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4ba // smmla v26.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xe0]\n"
+ ".inst 0x4e82a42e // smmla v14.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82a476 // smmla v22.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82a4be // smmla v30.4s, v5.16b, v2.16b\n"
"ldr q6, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
- ".inst 0x4e87a42b // smmla v11.4s, v1.16b, v7.16b\n"
- ".inst 0x4e87a473 // smmla v19.4s, v3.16b, v7.16b\n"
- ".inst 0x4e87a4bb // smmla v27.4s, v5.16b, v7.16b\n"
+ ".inst 0x4e80a42b // smmla v11.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80a473 // smmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a4bb // smmla v27.4s, v5.16b, v0.16b\n"
".inst 0x4e86a42f // smmla v15.4s, v1.16b, v6.16b\n"
".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bf // smmla v31.4s, v5.16b, v6.16b\n"
@@ -3093,49 +3092,49 @@ void a64_hybrid_s8s32_mmla_6x16 (
"blt 207f\n"
"206:" // Height 6: Multiply loop: Odd block loop
"ldr d1, [x26], #0x8\n"
- "ldr d2, [x25], #0x8\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr d0, [x25], #0x8\n"
+ "trn1 v4.2d, v1.2d, v0.2d\n"
"sub x27, x27, #0x8\n"
- "ldr d3, [x24], #0x8\n"
- "ldr d4, [x23], #0x8\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
+ "ldr d1, [x24], #0x8\n"
+ "ldr d0, [x23], #0x8\n"
+ "trn1 v3.2d, v1.2d, v0.2d\n"
"cmp x27, #0x8\n"
- "ldr d5, [x22], #0x8\n"
- "ldr d7, [x21], #0x8\n"
- "trn1 v4.2d, v5.2d, v7.2d\n"
- "ldr q6, [x10, #0x0]\n"
- "ldr q7, [x10, #0x10]\n"
- ".inst 0x4e86a408 // smmla v8.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a450 // smmla v16.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a498 // smmla v24.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x20]\n"
- ".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49c // smmla v28.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x30]\n"
- ".inst 0x4e86a409 // smmla v9.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a451 // smmla v17.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a499 // smmla v25.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x40]\n"
- ".inst 0x4e87a40d // smmla v13.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a455 // smmla v21.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49d // smmla v29.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x50]\n"
- ".inst 0x4e86a40a // smmla v10.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a452 // smmla v18.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49a // smmla v26.4s, v4.16b, v6.16b\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "trn1 v2.2d, v1.2d, v0.2d\n"
+ "ldr q1, [x10, #0x0]\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e81a488 // smmla v8.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81a470 // smmla v16.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x20]\n"
+ ".inst 0x4e80a48c // smmla v12.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45c // smmla v28.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e81a489 // smmla v9.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81a471 // smmla v17.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a459 // smmla v25.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x40]\n"
+ ".inst 0x4e80a48d // smmla v13.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45d // smmla v29.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e81a48a // smmla v10.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81a472 // smmla v18.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45a // smmla v26.4s, v2.16b, v1.16b\n"
"ldr q6, [x10, #0x60]\n"
- ".inst 0x4e87a40e // smmla v14.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49e // smmla v30.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x70]\n"
+ ".inst 0x4e80a48e // smmla v14.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45e // smmla v30.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e86a40b // smmla v11.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a453 // smmla v19.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49b // smmla v27.4s, v4.16b, v6.16b\n"
- ".inst 0x4e87a40f // smmla v15.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49f // smmla v31.4s, v4.16b, v7.16b\n"
+ ".inst 0x4e86a48b // smmla v11.4s, v4.16b, v6.16b\n"
+ ".inst 0x4e86a473 // smmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a45b // smmla v27.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80a48f // smmla v15.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80a477 // smmla v23.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45f // smmla v31.4s, v2.16b, v0.16b\n"
"bge 206b\n"
"207:" // Height 6: Multiply loop: Skip odd blocks
"cbz x27, 212f\n"
@@ -3194,42 +3193,42 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr b5, [x22, #0x0]\n"
"ldr b6, [x21, #0x0]\n"
"211:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x10, #0x0]\n"
- "trn1 v0.2d, v1.2d, v2.2d\n"
- "trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
- "trn1 v4.2d, v5.2d, v6.2d\n"
- "ldr q6, [x10, #0x10]\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x20]\n"
- ".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x30]\n"
- ".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x40]\n"
- ".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x10, #0x50]\n"
- ".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- ".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x10, #0x60]\n"
- ".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
+ "ldr q0, [x10, #0x0]\n"
+ "trn1 v7.2d, v1.2d, v2.2d\n"
+ "trn1 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x4e80a4e8 // smmla v8.4s, v7.16b, v0.16b\n"
+ "trn1 v2.2d, v5.2d, v6.2d\n"
+ "ldr q1, [x10, #0x10]\n"
+ ".inst 0x4e80a470 // smmla v16.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a458 // smmla v24.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x20]\n"
+ ".inst 0x4e81a4ec // smmla v12.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81a474 // smmla v20.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45c // smmla v28.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x30]\n"
+ ".inst 0x4e80a4e9 // smmla v9.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80a471 // smmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a459 // smmla v25.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x40]\n"
+ ".inst 0x4e81a4ed // smmla v13.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81a475 // smmla v21.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45d // smmla v29.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x50]\n"
+ ".inst 0x4e80a4ea // smmla v10.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80a472 // smmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45a // smmla v26.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x60]\n"
+ ".inst 0x4e81a4ee // smmla v14.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81a476 // smmla v22.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81a45e // smmla v30.4s, v2.16b, v1.16b\n"
"ldr q6, [x10, #0x70]\n"
- ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e80a4eb // smmla v11.4s, v7.16b, v0.16b\n"
"add x10, x10, #0x80\n"
- ".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
- ".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- ".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
- ".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
- ".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
+ ".inst 0x4e80a473 // smmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80a45b // smmla v27.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e86a4ef // smmla v15.4s, v7.16b, v6.16b\n"
+ ".inst 0x4e86a477 // smmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86a45f // smmla v31.4s, v2.16b, v6.16b\n"
"212:" // Height 6: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -3440,7 +3439,6 @@ void a64_hybrid_s8s32_mmla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"224:" // Exit
-
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"