diff options
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp | 1179 |
1 files changed, 588 insertions, 591 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp index 1f7804453c..88547ef3b3 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp @@ -50,19 +50,18 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( size_t output_offset = {}; size_t input_initial_col = {}; size_t input_offset = {}; - void *output_ptr = {}; - const float *bias = {}; } ka; unsigned long flags=0; + void *output_ptr; void *input_ptr; if (output_arg.is_indirect) { - ka.output_ptr=(void *)(output_arg.indirect.ptr); + output_ptr=(void *)(output_arg.indirect.ptr); ka.output_offset=output_arg.indirect.offset; flags |= 0x4; } else { - ka.output_ptr=(void *)(output_arg.direct.base); + output_ptr=(void *)(output_arg.direct.base); ka.output_offset=output_arg.direct.stride; } @@ -83,7 +82,6 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( ka.string_lengths = string_lengths; ka.N = N; ka.B_ptr = B_ptr; - ka.bias = bias; ka.B_stride = B_stride; switch(act.type) { default: @@ -105,14 +103,13 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "bgt 89f\n" "beq 45f\n" "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n" - "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n" + "mov x15, %x[bias]\n" "ldr x14, [%x[args_ptr], %[offsetof_N]]\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" - "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n" + "mov x13, %x[output_ptr]\n" "2:" // Height 1: Column loop "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n" - "cmp x14, #0x14\n" "add x11, x12, x20, LSL #1\n" "add x10, x11, x20, LSL #1\n" "add x9, x10, x20, LSL #1\n" @@ -120,6 +117,7 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "add x27, x28, x20, LSL #1\n" "add x20, x27, x20, LSL #1\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" + "cmp x14, #0x14\n" "bgt 3f\n" "cmp x14, #0x10\n" "mov x27, x12\n" @@ -138,19 +136,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "cbz x15, 4f\n" "ldr q8, [x15, #0x0]\n" "ldr q9, [x15, #0x10]\n" - "ldr q10, [x15, #0x20]\n" - "ldr q11, [x15, #0x30]\n" - "ldr q12, [x15, #0x40]\n" - "ldr q13, [x15, #0x50]\n" - "add x15, x15, #0x60\n" "zip2 v14.2d, v8.2d, v8.2d\n" "zip1 v8.2d, v8.2d, v8.2d\n" + "ldr q10, [x15, #0x20]\n" + "ldr q11, [x15, #0x30]\n" "zip2 v15.2d, v9.2d, v9.2d\n" "zip1 v9.2d, v9.2d, v9.2d\n" + "ldr q12, [x15, #0x40]\n" + "ldr q13, [x15, #0x50]\n" "zip2 v16.2d, v10.2d, v10.2d\n" "zip1 v10.2d, v10.2d, v10.2d\n" "zip2 v17.2d, v11.2d, v11.2d\n" "zip1 v11.2d, v11.2d, v11.2d\n" + "add x15, x15, #0x60\n" "zip2 v18.2d, v12.2d, v12.2d\n" "zip1 v12.2d, v12.2d, v12.2d\n" "zip2 v19.2d, v13.2d, v13.2d\n" @@ -284,8 +282,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "mov x26, #0x0\n" "21:" // Height 1: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 22f\n" "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" "add x20, x20, x21, LSL #3\n" @@ -308,32 +306,28 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "blt 25f\n" "24:" // Height 1: Multiply loop: Main loop head ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - "sub x25, x25, #0x4\n" - "add x12, x12, #0x20\n" - "cmp x25, #0x8\n" - "add x11, x11, #0x20\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" "ldr q24, [x10, #0x0]\n" ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" "ldr q23, [x10, #0x10]\n" - "add x10, x10, #0x20\n" ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" "ldr q22, [x9, #0x0]\n" ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" "ldr q21, [x9, #0x10]\n" - "add x9, x9, #0x20\n" ".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n" "ldr q24, [x28, #0x0]\n" ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n" "ldr q23, [x28, #0x10]\n" - "add x28, x28, #0x20\n" ".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n" "ldr q22, [x27, #0x0]\n" ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n" "ldr q21, [x27, #0x10]\n" - "add x27, x27, #0x20\n" + "sub x25, x25, #0x4\n" + "cmp x25, #0x8\n" ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n" + "add x12, x12, #0x20\n" "ldr q4, [x12, #0x0]\n" + "add x11, x11, #0x20\n" ".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n" "ldr q5, [x12, #0x10]\n" ".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n" @@ -341,36 +335,40 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( ".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n" "ld1 { v0.4s }, [x24], #0x10\n" "ldr q7, [x11, #0x10]\n" + "add x10, x10, #0x20\n" + "add x9, x9, #0x20\n" + "add x28, x28, #0x20\n" + "add x27, x27, #0x20\n" "bge 24b\n" "25:" // Height 1: Multiply loop: Single iteration only ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - "sub x25, x25, #0x4\n" - "add x12, x12, #0x20\n" - "add x11, x11, #0x20\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" - "ldr q23, [x10, #0x0]\n" + "ldr q22, [x10, #0x0]\n" ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" - "ldr q22, [x10, #0x10]\n" - "add x10, x10, #0x20\n" + "ldr q25, [x10, #0x10]\n" ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" "ldr q21, [x9, #0x0]\n" ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" "ldr q24, [x9, #0x10]\n" - "add x9, x9, #0x20\n" - ".inst 0x6e57ec0a // bfmmla v10.4s, v0.8h, v23.8h\n" + ".inst 0x6e56ec0a // bfmmla v10.4s, v0.8h, v22.8h\n" "ldr q23, [x28, #0x0]\n" - ".inst 0x6e56ec10 // bfmmla v16.4s, v0.8h, v22.8h\n" + ".inst 0x6e59ec10 // bfmmla v16.4s, v0.8h, v25.8h\n" "ldr q22, [x28, #0x10]\n" - "add x28, x28, #0x20\n" ".inst 0x6e55ec0b // bfmmla v11.4s, v0.8h, v21.8h\n" "ldr q21, [x27, #0x0]\n" ".inst 0x6e58ec11 // bfmmla v17.4s, v0.8h, v24.8h\n" "ldr q3, [x27, #0x10]\n" - "add x27, x27, #0x20\n" + "sub x25, x25, #0x4\n" ".inst 0x6e57ec0c // bfmmla v12.4s, v0.8h, v23.8h\n" ".inst 0x6e56ec12 // bfmmla v18.4s, v0.8h, v22.8h\n" + "add x12, x12, #0x20\n" + "add x11, x11, #0x20\n" ".inst 0x6e55ec0d // bfmmla v13.4s, v0.8h, v21.8h\n" ".inst 0x6e43ec13 // bfmmla v19.4s, v0.8h, v3.8h\n" + "add x10, x10, #0x20\n" + "add x9, x9, #0x20\n" + "add x28, x28, #0x20\n" + "add x27, x27, #0x20\n" "26:" // Height 1: Multiply loop: Main loop skip "cbz x25, 29f\n" "cbz x25, 29f\n" @@ -382,37 +380,37 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "27:" // Height 1: Multiply loop: Ragged operand read: partial_1_0 "ldr s0, [x24, #0x0]\n" "28:" // Height 1: Multiply loop: Ragged operand read: Done - "ldr q23, [x12, #0x0]\n" - "ldr q29, [x12, #0x10]\n" + "ldr q21, [x12, #0x0]\n" + "ldr q30, [x12, #0x10]\n" ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - "add x12, x12, #0x20\n" - "ldr q22, [x11, #0x0]\n" - "ldr q21, [x11, #0x10]\n" - "add x11, x11, #0x20\n" - ".inst 0x6e57ec08 // bfmmla v8.4s, v0.8h, v23.8h\n" - "ldr q24, [x10, #0x0]\n" - ".inst 0x6e5dec0e // bfmmla v14.4s, v0.8h, v29.8h\n" + ".inst 0x6e55ec08 // bfmmla v8.4s, v0.8h, v21.8h\n" + "ldr q21, [x11, #0x0]\n" + "ldr q22, [x11, #0x10]\n" + ".inst 0x6e5eec0e // bfmmla v14.4s, v0.8h, v30.8h\n" + ".inst 0x6e55ec09 // bfmmla v9.4s, v0.8h, v21.8h\n" + "ldr q21, [x10, #0x0]\n" "ldr q23, [x10, #0x10]\n" - "add x10, x10, #0x20\n" - ".inst 0x6e56ec09 // bfmmla v9.4s, v0.8h, v22.8h\n" - "ldr q22, [x9, #0x0]\n" - ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n" - "ldr q21, [x9, #0x10]\n" - "add x9, x9, #0x20\n" - ".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n" - "ldr q24, [x28, #0x0]\n" + ".inst 0x6e56ec0f // bfmmla v15.4s, v0.8h, v22.8h\n" + ".inst 0x6e55ec0a // bfmmla v10.4s, v0.8h, v21.8h\n" + "ldr q21, [x9, #0x0]\n" + "ldr q22, [x9, #0x10]\n" ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n" + ".inst 0x6e55ec0b // bfmmla v11.4s, v0.8h, v21.8h\n" + "ldr q21, [x28, #0x0]\n" "ldr q23, [x28, #0x10]\n" - "add x28, x28, #0x20\n" - ".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n" + ".inst 0x6e56ec11 // bfmmla v17.4s, v0.8h, v22.8h\n" + ".inst 0x6e55ec0c // bfmmla v12.4s, v0.8h, v21.8h\n" "ldr q22, [x27, #0x0]\n" - ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n" "ldr q21, [x27, #0x10]\n" - "add x27, x27, #0x20\n" - ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n" ".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n" ".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n" ".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n" + "add x12, x12, #0x20\n" + "add x11, x11, #0x20\n" + "add x10, x10, #0x20\n" + "add x9, x9, #0x20\n" + "add x28, x28, #0x20\n" + "add x27, x27, #0x20\n" "29:" // Height 1: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x26, x26, #0x1\n" @@ -425,9 +423,9 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "uzp1 v12.2d, v12.2d, v18.2d\n" "uzp1 v13.2d, v13.2d, v19.2d\n" "tbz %x[flags], #1, 30f\n" - "add x21, %x[args_ptr], %[offset_max]\n" + "add x20, %x[args_ptr], %[offset_max]\n" + "ld1r { v22.4s }, [x20]\n" "add x20, %x[args_ptr], %[offset_min]\n" - "ld1r { v22.4s }, [x21]\n" "ld1r { v21.4s }, [x20]\n" "fmin v8.4s, v8.4s, v22.4s\n" "fmin v9.4s, v9.4s, v22.4s\n" @@ -531,14 +529,13 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "b 178f\n" "45:" // Height 2 "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n" - "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n" + "mov x15, %x[bias]\n" "ldr x14, [%x[args_ptr], %[offsetof_N]]\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" - "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n" + "mov x13, %x[output_ptr]\n" "46:" // Height 2: Column loop "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n" - "cmp x14, #0x14\n" "add x11, x12, x20, LSL #1\n" "add x10, x11, x20, LSL #1\n" "add x9, x10, x20, LSL #1\n" @@ -546,6 +543,7 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "add x27, x28, x20, LSL #1\n" "add x20, x27, x20, LSL #1\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" + "cmp x14, #0x14\n" "bgt 47f\n" "cmp x14, #0x10\n" "mov x27, x12\n" @@ -564,19 +562,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "cbz x15, 48f\n" "ldr q8, [x15, #0x0]\n" "ldr q9, [x15, #0x10]\n" - "ldr q10, [x15, #0x20]\n" - "ldr q11, [x15, #0x30]\n" - "ldr q12, [x15, #0x40]\n" - "ldr q13, [x15, #0x50]\n" - "add x15, x15, #0x60\n" "zip2 v14.2d, v8.2d, v8.2d\n" "zip1 v8.2d, v8.2d, v8.2d\n" + "ldr q10, [x15, #0x20]\n" + "ldr q11, [x15, #0x30]\n" "zip2 v15.2d, v9.2d, v9.2d\n" "zip1 v9.2d, v9.2d, v9.2d\n" + "ldr q12, [x15, #0x40]\n" + "ldr q13, [x15, #0x50]\n" "zip2 v16.2d, v10.2d, v10.2d\n" "zip1 v10.2d, v10.2d, v10.2d\n" "zip2 v17.2d, v11.2d, v11.2d\n" "zip1 v11.2d, v11.2d, v11.2d\n" + "add x15, x15, #0x60\n" "zip2 v18.2d, v12.2d, v12.2d\n" "zip1 v12.2d, v12.2d, v12.2d\n" "zip2 v19.2d, v13.2d, v13.2d\n" @@ -586,117 +584,117 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "tbz %x[flags], #0, 63f\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" "cmp x14, #0x18\n" - "add x24, x13, x20, LSL #2\n" + "add x23, x13, x20, LSL #2\n" "bge 61f\n" "tbz x14, #4, 52f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" "ld1 { v10.4s }, [x13], #0x10\n" - "ld1 { v15.4s }, [x24], #0x10\n" + "ld1 { v15.4s }, [x23], #0x10\n" "ld1 { v11.4s }, [x13], #0x10\n" - "ld1 { v16.4s }, [x24], #0x10\n" + "ld1 { v16.4s }, [x23], #0x10\n" "ld1 { v12.4s }, [x13], #0x10\n" - "ld1 { v17.4s }, [x24], #0x10\n" + "ld1 { v17.4s }, [x23], #0x10\n" "tbz x14, #2, 50f\n" "ld1 { v13.4s }, [x13], #0x10\n" - "ld1 { v18.4s }, [x24], #0x10\n" + "ld1 { v18.4s }, [x23], #0x10\n" "tbz x14, #1, 49f\n" "ldr d20, [x13], #0x8\n" - "ldr d19, [x24], #0x8\n" + "ldr d19, [x23], #0x8\n" "mov x20, #0x58\n" "tbz x14, #0, 60f\n" "ld1 { v20.s }[2], [x13]\n" - "ld1 { v19.s }[2], [x24]\n" + "ld1 { v19.s }[2], [x23]\n" "b 60f\n" "49:" // Height 2: Partial accumulate: partial_1_20 "mov x20, #0x50\n" "tbz x14, #0, 60f\n" "ldr s20, [x13, #0x0]\n" - "ldr s19, [x24, #0x0]\n" + "ldr s19, [x23, #0x0]\n" "b 60f\n" "50:" // Height 2: Partial accumulate: partial_2_16 "tbz x14, #1, 51f\n" "ldr d13, [x13], #0x8\n" - "ldr d18, [x24], #0x8\n" + "ldr d18, [x23], #0x8\n" "mov x20, #0x48\n" "tbz x14, #0, 60f\n" "ld1 { v13.s }[2], [x13]\n" - "ld1 { v18.s }[2], [x24]\n" + "ld1 { v18.s }[2], [x23]\n" "b 60f\n" "51:" // Height 2: Partial accumulate: partial_1_16 "mov x20, #0x40\n" "tbz x14, #0, 60f\n" "ldr s13, [x13, #0x0]\n" - "ldr s18, [x24, #0x0]\n" + "ldr s18, [x23, #0x0]\n" "b 60f\n" "52:" // Height 2: Partial accumulate: partial_8_0 "tbz x14, #3, 56f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" "ld1 { v10.4s }, [x13], #0x10\n" - "ld1 { v15.4s }, [x24], #0x10\n" + "ld1 { v15.4s }, [x23], #0x10\n" "tbz x14, #2, 54f\n" "ld1 { v11.4s }, [x13], #0x10\n" - "ld1 { v16.4s }, [x24], #0x10\n" + "ld1 { v16.4s }, [x23], #0x10\n" "tbz x14, #1, 53f\n" "ldr d12, [x13], #0x8\n" - "ldr d17, [x24], #0x8\n" + "ldr d17, [x23], #0x8\n" "mov x20, #0x38\n" "tbz x14, #0, 60f\n" "ld1 { v12.s }[2], [x13]\n" - "ld1 { v17.s }[2], [x24]\n" + "ld1 { v17.s }[2], [x23]\n" "b 60f\n" "53:" // Height 2: Partial accumulate: partial_1_12 "mov x20, #0x30\n" "tbz x14, #0, 60f\n" "ldr s12, [x13, #0x0]\n" - "ldr s17, [x24, #0x0]\n" + "ldr s17, [x23, #0x0]\n" "b 60f\n" "54:" // Height 2: Partial accumulate: partial_2_8 "tbz x14, #1, 55f\n" "ldr d11, [x13], #0x8\n" - "ldr d16, [x24], #0x8\n" + "ldr d16, [x23], #0x8\n" "mov x20, #0x28\n" "tbz x14, #0, 60f\n" "ld1 { v11.s }[2], [x13]\n" - "ld1 { v16.s }[2], [x24]\n" + "ld1 { v16.s }[2], [x23]\n" "b 60f\n" "55:" // Height 2: Partial accumulate: partial_1_8 "mov x20, #0x20\n" "tbz x14, #0, 60f\n" "ldr s11, [x13, #0x0]\n" - "ldr s16, [x24, #0x0]\n" + "ldr s16, [x23, #0x0]\n" "b 60f\n" "56:" // Height 2: Partial accumulate: partial_4_0 "tbz x14, #2, 58f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" "tbz x14, #1, 57f\n" "ldr d10, [x13], #0x8\n" - "ldr d15, [x24], #0x8\n" + "ldr d15, [x23], #0x8\n" "mov x20, #0x18\n" "tbz x14, #0, 60f\n" "ld1 { v10.s }[2], [x13]\n" - "ld1 { v15.s }[2], [x24]\n" + "ld1 { v15.s }[2], [x23]\n" "b 60f\n" "57:" // Height 2: Partial accumulate: partial_1_4 "mov x20, #0x10\n" "tbz x14, #0, 60f\n" "ldr s10, [x13, #0x0]\n" - "ldr s15, [x24, #0x0]\n" + "ldr s15, [x23, #0x0]\n" "b 60f\n" "58:" // Height 2: Partial accumulate: partial_2_0 "tbz x14, #1, 59f\n" "ldr d9, [x13], #0x8\n" - "ldr d14, [x24], #0x8\n" + "ldr d14, [x23], #0x8\n" "mov x20, #0x8\n" "tbz x14, #0, 60f\n" "ld1 { v9.s }[2], [x13]\n" - "ld1 { v14.s }[2], [x24]\n" + "ld1 { v14.s }[2], [x23]\n" "b 60f\n" "59:" // Height 2: Partial accumulate: partial_1_0 "ldr s9, [x13, #0x0]\n" - "ldr s14, [x24, #0x0]\n" + "ldr s14, [x23, #0x0]\n" "mov x20, #0x0\n" "60:" // Height 2: Partial accumulate: Done "sub x13, x13, x20\n" @@ -708,12 +706,12 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "ldr q12, [x13, #0x30]\n" "ldr q13, [x13, #0x40]\n" "ldr q20, [x13, #0x50]\n" - "ldr q14, [x24, #0x0]\n" - "ldr q15, [x24, #0x10]\n" - "ldr q16, [x24, #0x20]\n" - "ldr q17, [x24, #0x30]\n" - "ldr q18, [x24, #0x40]\n" - "ldr q19, [x24, #0x50]\n" + "ldr q14, [x23, #0x0]\n" + "ldr q15, [x23, #0x10]\n" + "ldr q16, [x23, #0x20]\n" + "ldr q17, [x23, #0x30]\n" + "ldr q18, [x23, #0x40]\n" + "ldr q19, [x23, #0x50]\n" "62:" // Height 2: MMLA fixup "zip1 v8.2d, v9.2d, v14.2d\n" "zip2 v14.2d, v9.2d, v14.2d\n" @@ -745,8 +743,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "mov x26, #0x0\n" "65:" // Height 2: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 66f\n" "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" "add x20, x20, x21, LSL #3\n" @@ -773,72 +771,72 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "blt 69f\n" "68:" // Height 2: Multiply loop: Main loop head ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - "sub x25, x25, #0x4\n" - "add x12, x12, #0x20\n" - "cmp x25, #0x8\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" "ld1 { v1.4s }, [x23], #0x10\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" - "ldr q29, [x10, #0x0]\n" + "ldr q30, [x10, #0x0]\n" ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" "ldr q23, [x10, #0x10]\n" - "add x10, x10, #0x20\n" ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" "ldr q22, [x9, #0x0]\n" ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" "ldr q21, [x9, #0x10]\n" - "add x9, x9, #0x20\n" - ".inst 0x6e5dec0a // bfmmla v10.4s, v0.8h, v29.8h\n" - "ldr q30, [x28, #0x0]\n" + ".inst 0x6e5eec0a // bfmmla v10.4s, v0.8h, v30.8h\n" + "ldr q2, [x28, #0x0]\n" ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n" "ldr q23, [x28, #0x10]\n" - "add x28, x28, #0x20\n" ".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n" "ldr q22, [x27, #0x0]\n" ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n" "ldr q21, [x27, #0x10]\n" - "add x27, x27, #0x20\n" - ".inst 0x6e5eec0c // bfmmla v12.4s, v0.8h, v30.8h\n" + "sub x25, x25, #0x4\n" + "cmp x25, #0x8\n" + "add x12, x12, #0x20\n" + ".inst 0x6e42ec0c // bfmmla v12.4s, v0.8h, v2.8h\n" "ldr q4, [x12, #0x0]\n" + "add x11, x11, #0x20\n" ".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n" "ldr q5, [x12, #0x10]\n" ".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n" "ldr q6, [x11, #0x0]\n" ".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n" "ld1 { v0.4s }, [x24], #0x10\n" + "add x10, x10, #0x20\n" "ldr q7, [x11, #0x10]\n" + "add x9, x9, #0x20\n" + "add x28, x28, #0x20\n" + "add x27, x27, #0x20\n" "bge 68b\n" "69:" // Height 2: Multiply loop: Single iteration only ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - "sub x25, x25, #0x4\n" - "add x12, x12, #0x20\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" "ldr q24, [x10, #0x0]\n" ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" "ldr q23, [x10, #0x10]\n" - "add x10, x10, #0x20\n" ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" "ldr q22, [x9, #0x0]\n" ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" "ldr q21, [x9, #0x10]\n" - "add x9, x9, #0x20\n" ".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n" "ldr q24, [x28, #0x0]\n" ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n" "ldr q23, [x28, #0x10]\n" - "add x28, x28, #0x20\n" ".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n" "ldr q22, [x27, #0x0]\n" ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n" "ldr q21, [x27, #0x10]\n" - "add x27, x27, #0x20\n" + "sub x25, x25, #0x4\n" ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n" ".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n" + "add x12, x12, #0x20\n" + "add x11, x11, #0x20\n" ".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n" ".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n" + "add x10, x10, #0x20\n" + "add x9, x9, #0x20\n" + "add x28, x28, #0x20\n" + "add x27, x27, #0x20\n" "70:" // Height 2: Multiply loop: Main loop skip "cbz x25, 73f\n" "cbz x25, 73f\n" @@ -856,35 +854,35 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "ldr q24, [x12, #0x0]\n" "ldr q23, [x12, #0x10]\n" ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - "add x12, x12, #0x20\n" + ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" "ldr q22, [x11, #0x0]\n" "ldr q21, [x11, #0x10]\n" - "add x11, x11, #0x20\n" - ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" ".inst 0x6e58ec08 // bfmmla v8.4s, v0.8h, v24.8h\n" - "ldr q24, [x10, #0x0]\n" ".inst 0x6e57ec0e // bfmmla v14.4s, v0.8h, v23.8h\n" + "ldr q24, [x10, #0x0]\n" "ldr q23, [x10, #0x10]\n" - "add x10, x10, #0x20\n" ".inst 0x6e56ec09 // bfmmla v9.4s, v0.8h, v22.8h\n" - "ldr q22, [x9, #0x0]\n" ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n" + "ldr q22, [x9, #0x0]\n" "ldr q21, [x9, #0x10]\n" - "add x9, x9, #0x20\n" ".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n" - "ldr q24, [x28, #0x0]\n" ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n" + "ldr q24, [x28, #0x0]\n" "ldr q23, [x28, #0x10]\n" - "add x28, x28, #0x20\n" ".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n" - "ldr q22, [x27, #0x0]\n" ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n" + "ldr q22, [x27, #0x0]\n" "ldr q21, [x27, #0x10]\n" - "add x27, x27, #0x20\n" ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n" ".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n" ".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n" ".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n" + "add x12, x12, #0x20\n" + "add x11, x11, #0x20\n" + "add x10, x10, #0x20\n" + "add x9, x9, #0x20\n" + "add x28, x28, #0x20\n" + "add x27, x27, #0x20\n" "73:" // Height 2: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x26, x26, #0x1\n" @@ -893,21 +891,21 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" "uzp1 v4.2d, v8.2d, v14.2d\n" "uzp2 v8.2d, v8.2d, v14.2d\n" + "add x23, x13, x20, LSL #2\n" "uzp1 v14.2d, v9.2d, v15.2d\n" "uzp2 v9.2d, v9.2d, v15.2d\n" "uzp1 v15.2d, v10.2d, v16.2d\n" "uzp2 v10.2d, v10.2d, v16.2d\n" "uzp1 v16.2d, v11.2d, v17.2d\n" "uzp2 v11.2d, v11.2d, v17.2d\n" - "add x24, x13, x20, LSL #2\n" "uzp1 v17.2d, v12.2d, v18.2d\n" "uzp2 v12.2d, v12.2d, v18.2d\n" "uzp1 v18.2d, v13.2d, v19.2d\n" "uzp2 v13.2d, v13.2d, v19.2d\n" "tbz %x[flags], #1, 74f\n" - "add x21, %x[args_ptr], %[offset_max]\n" + "add x20, %x[args_ptr], %[offset_max]\n" + "ld1r { v22.4s }, [x20]\n" "add x20, %x[args_ptr], %[offset_min]\n" - "ld1r { v22.4s }, [x21]\n" "ld1r { v21.4s }, [x20]\n" "fmin v4.4s, v4.4s, v22.4s\n" "fmin v14.4s, v14.4s, v22.4s\n" @@ -941,99 +939,99 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "st1 { v14.4s }, [x13], #0x10\n" "st1 { v15.4s }, [x13], #0x10\n" "st1 { v16.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v9.4s }, [x24], #0x10\n" - "st1 { v10.4s }, [x24], #0x10\n" - "st1 { v11.4s }, [x24], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v9.4s }, [x23], #0x10\n" + "st1 { v10.4s }, [x23], #0x10\n" + "st1 { v11.4s }, [x23], #0x10\n" "tbz x14, #2, 76f\n" "st1 { v17.4s }, [x13], #0x10\n" - "st1 { v12.4s }, [x24], #0x10\n" + "st1 { v12.4s }, [x23], #0x10\n" "tbz x14, #1, 75f\n" "str d18, [x13], #0x8\n" - "str d13, [x24], #0x8\n" + "str d13, [x23], #0x8\n" "tbz x14, #0, 86f\n" "st1 { v18.s }[2], [x13]\n" - "st1 { v13.s }[2], [x24]\n" + "st1 { v13.s }[2], [x23]\n" "b 86f\n" "75:" // Height 2: Partial direct writeback: partial_1_20 "tbz x14, #0, 86f\n" "str s18, [x13, #0x0]\n" - "str s13, [x24, #0x0]\n" + "str s13, [x23, #0x0]\n" "b 86f\n" "76:" // Height 2: Partial direct writeback: partial_2_16 "tbz x14, #1, 77f\n" "str d17, [x13], #0x8\n" - "str d12, [x24], #0x8\n" + "str d12, [x23], #0x8\n" "tbz x14, #0, 86f\n" "st1 { v17.s }[2], [x13]\n" - "st1 { v12.s }[2], [x24]\n" + "st1 { v12.s }[2], [x23]\n" "b 86f\n" "77:" // Height 2: Partial direct writeback: partial_1_16 "tbz x14, #0, 86f\n" "str s17, [x13, #0x0]\n" - "str s12, [x24, #0x0]\n" + "str s12, [x23, #0x0]\n" "b 86f\n" "78:" // Height 2: Partial direct writeback: partial_8_0 "tbz x14, #3, 82f\n" "st1 { v4.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v9.4s }, [x24], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v9.4s }, [x23], #0x10\n" "tbz x14, #2, 80f\n" "st1 { v15.4s }, [x13], #0x10\n" - "st1 { v10.4s }, [x24], #0x10\n" + "st1 { v10.4s }, [x23], #0x10\n" "tbz x14, #1, 79f\n" "str d16, [x13], #0x8\n" - "str d11, [x24], #0x8\n" + "str d11, [x23], #0x8\n" "tbz x14, #0, 86f\n" "st1 { v16.s }[2], [x13]\n" - "st1 { v11.s }[2], [x24]\n" + "st1 { v11.s }[2], [x23]\n" "b 86f\n" "79:" // Height 2: Partial direct writeback: partial_1_12 "tbz x14, #0, 86f\n" "str s16, [x13, #0x0]\n" - "str s11, [x24, #0x0]\n" + "str s11, [x23, #0x0]\n" "b 86f\n" "80:" // Height 2: Partial direct writeback: partial_2_8 "tbz x14, #1, 81f\n" "str d15, [x13], #0x8\n" - "str d10, [x24], #0x8\n" + "str d10, [x23], #0x8\n" "tbz x14, #0, 86f\n" "st1 { v15.s }[2], [x13]\n" - "st1 { v10.s }[2], [x24]\n" + "st1 { v10.s }[2], [x23]\n" "b 86f\n" "81:" // Height 2: Partial direct writeback: partial_1_8 "tbz x14, #0, 86f\n" "str s15, [x13, #0x0]\n" - "str s10, [x24, #0x0]\n" + "str s10, [x23, #0x0]\n" "b 86f\n" "82:" // Height 2: Partial direct writeback: partial_4_0 "tbz x14, #2, 84f\n" "st1 { v4.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" "tbz x14, #1, 83f\n" "str d14, [x13], #0x8\n" - "str d9, [x24], #0x8\n" + "str d9, [x23], #0x8\n" "tbz x14, #0, 86f\n" "st1 { v14.s }[2], [x13]\n" - "st1 { v9.s }[2], [x24]\n" + "st1 { v9.s }[2], [x23]\n" "b 86f\n" "83:" // Height 2: Partial direct writeback: partial_1_4 "tbz x14, #0, 86f\n" "str s14, [x13, #0x0]\n" - "str s9, [x24, #0x0]\n" + "str s9, [x23, #0x0]\n" "b 86f\n" "84:" // Height 2: Partial direct writeback: partial_2_0 "tbz x14, #1, 85f\n" "str d4, [x13], #0x8\n" - "str d8, [x24], #0x8\n" + "str d8, [x23], #0x8\n" "tbz x14, #0, 86f\n" "st1 { v4.s }[2], [x13]\n" - "st1 { v8.s }[2], [x24]\n" + "st1 { v8.s }[2], [x23]\n" "b 86f\n" "85:" // Height 2: Partial direct writeback: partial_1_0 "str s4, [x13, #0x0]\n" - "str s8, [x24, #0x0]\n" + "str s8, [x23, #0x0]\n" "86:" // Height 2: Partial direct writeback: Done "b 88f\n" "87:" // Height 2: Full writeback @@ -1044,26 +1042,25 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "str q17, [x13, #0x40]\n" "str q18, [x13, #0x50]\n" "add x13, x13, #0x60\n" - "str q8, [x24, #0x0]\n" - "str q9, [x24, #0x10]\n" - "str q10, [x24, #0x20]\n" - "str q11, [x24, #0x30]\n" - "str q12, [x24, #0x40]\n" - "str q13, [x24, #0x50]\n" + "str q8, [x23, #0x0]\n" + "str q9, [x23, #0x10]\n" + "str q10, [x23, #0x20]\n" + "str q11, [x23, #0x30]\n" + "str q12, [x23, #0x40]\n" + "str q13, [x23, #0x50]\n" "88:" // Height 2: Writeback done "subs x14, x14, #0x18\n" "bgt 46b\n" "b 178f\n" "89:" // Height 3 "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n" - "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n" + "mov x15, %x[bias]\n" "ldr x14, [%x[args_ptr], %[offsetof_N]]\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" - "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n" + "mov x13, %x[output_ptr]\n" "90:" // Height 3: Column loop "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n" - "cmp x14, #0x14\n" "add x11, x12, x20, LSL #1\n" "add x10, x11, x20, LSL #1\n" "add x9, x10, x20, LSL #1\n" @@ -1071,6 +1068,7 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "add x27, x28, x20, LSL #1\n" "add x20, x27, x20, LSL #1\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" + "cmp x14, #0x14\n" "bgt 91f\n" "cmp x14, #0x10\n" "mov x27, x12\n" @@ -1089,19 +1087,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "cbz x15, 92f\n" "ldr q8, [x15, #0x0]\n" "ldr q9, [x15, #0x10]\n" - "ldr q10, [x15, #0x20]\n" - "ldr q11, [x15, #0x30]\n" - "ldr q12, [x15, #0x40]\n" - "ldr q13, [x15, #0x50]\n" - "add x15, x15, #0x60\n" "zip2 v14.2d, v8.2d, v8.2d\n" "zip1 v8.2d, v8.2d, v8.2d\n" + "ldr q10, [x15, #0x20]\n" + "ldr q11, [x15, #0x30]\n" "zip2 v15.2d, v9.2d, v9.2d\n" "zip1 v9.2d, v9.2d, v9.2d\n" + "ldr q12, [x15, #0x40]\n" + "ldr q13, [x15, #0x50]\n" "zip2 v16.2d, v10.2d, v10.2d\n" "zip1 v10.2d, v10.2d, v10.2d\n" "zip2 v17.2d, v11.2d, v11.2d\n" "zip1 v11.2d, v11.2d, v11.2d\n" + "add x15, x15, #0x60\n" "zip2 v18.2d, v12.2d, v12.2d\n" "zip1 v12.2d, v12.2d, v12.2d\n" "zip2 v19.2d, v13.2d, v13.2d\n" @@ -1122,147 +1120,147 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "92:" // Height 3: no bias "tbz %x[flags], #0, 107f\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" + "add x23, x13, x20, LSL #2\n" "cmp x14, #0x18\n" - "add x24, x13, x20, LSL #2\n" - "add x23, x24, x20, LSL #2\n" + "add x22, x23, x20, LSL #2\n" "bge 105f\n" "tbz x14, #4, 96f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" - "ld1 { v21.4s }, [x23], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" + "ld1 { v21.4s }, [x22], #0x10\n" "ld1 { v10.4s }, [x13], #0x10\n" - "ld1 { v15.4s }, [x24], #0x10\n" - "ld1 { v22.4s }, [x23], #0x10\n" + "ld1 { v15.4s }, [x23], #0x10\n" + "ld1 { v22.4s }, [x22], #0x10\n" "ld1 { v11.4s }, [x13], #0x10\n" - "ld1 { v16.4s }, [x24], #0x10\n" - "ld1 { v23.4s }, [x23], #0x10\n" + "ld1 { v16.4s }, [x23], #0x10\n" + "ld1 { v23.4s }, [x22], #0x10\n" "ld1 { v12.4s }, [x13], #0x10\n" - "ld1 { v17.4s }, [x24], #0x10\n" - "ld1 { v24.4s }, [x23], #0x10\n" + "ld1 { v17.4s }, [x23], #0x10\n" + "ld1 { v24.4s }, [x22], #0x10\n" "tbz x14, #2, 94f\n" "ld1 { v13.4s }, [x13], #0x10\n" - "ld1 { v18.4s }, [x24], #0x10\n" - "ld1 { v25.4s }, [x23], #0x10\n" + "ld1 { v18.4s }, [x23], #0x10\n" + "ld1 { v25.4s }, [x22], #0x10\n" "tbz x14, #1, 93f\n" "ldr d20, [x13], #0x8\n" - "ldr d19, [x24], #0x8\n" + "ldr d19, [x23], #0x8\n" "mov x20, #0x58\n" - "ldr d4, [x23], #0x8\n" + "ldr d4, [x22], #0x8\n" "tbz x14, #0, 104f\n" "ld1 { v20.s }[2], [x13]\n" - "ld1 { v19.s }[2], [x24]\n" - "ld1 { v4.s }[2], [x23]\n" + "ld1 { v19.s }[2], [x23]\n" + "ld1 { v4.s }[2], [x22]\n" "b 104f\n" "93:" // Height 3: Partial accumulate: partial_1_20 "mov x20, #0x50\n" "tbz x14, #0, 104f\n" "ldr s20, [x13, #0x0]\n" - "ldr s19, [x24, #0x0]\n" - "ldr s4, [x23, #0x0]\n" + "ldr s19, [x23, #0x0]\n" + "ldr s4, [x22, #0x0]\n" "b 104f\n" "94:" // Height 3: Partial accumulate: partial_2_16 "tbz x14, #1, 95f\n" "ldr d13, [x13], #0x8\n" - "ldr d18, [x24], #0x8\n" + "ldr d18, [x23], #0x8\n" "mov x20, #0x48\n" - "ldr d25, [x23], #0x8\n" + "ldr d25, [x22], #0x8\n" "tbz x14, #0, 104f\n" "ld1 { v13.s }[2], [x13]\n" - "ld1 { v18.s }[2], [x24]\n" - "ld1 { v25.s }[2], [x23]\n" + "ld1 { v18.s }[2], [x23]\n" + "ld1 { v25.s }[2], [x22]\n" "b 104f\n" "95:" // Height 3: Partial accumulate: partial_1_16 "mov x20, #0x40\n" "tbz x14, #0, 104f\n" "ldr s13, [x13, #0x0]\n" - "ldr s18, [x24, #0x0]\n" - "ldr s25, [x23, #0x0]\n" + "ldr s18, [x23, #0x0]\n" + "ldr s25, [x22, #0x0]\n" "b 104f\n" "96:" // Height 3: Partial accumulate: partial_8_0 "tbz x14, #3, 100f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" - "ld1 { v21.4s }, [x23], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" + "ld1 { v21.4s }, [x22], #0x10\n" "ld1 { v10.4s }, [x13], #0x10\n" - "ld1 { v15.4s }, [x24], #0x10\n" - "ld1 { v22.4s }, [x23], #0x10\n" + "ld1 { v15.4s }, [x23], #0x10\n" + "ld1 { v22.4s }, [x22], #0x10\n" "tbz x14, #2, 98f\n" "ld1 { v11.4s }, [x13], #0x10\n" - "ld1 { v16.4s }, [x24], #0x10\n" - "ld1 { v23.4s }, [x23], #0x10\n" + "ld1 { v16.4s }, [x23], #0x10\n" + "ld1 { v23.4s }, [x22], #0x10\n" "tbz x14, #1, 97f\n" "ldr d12, [x13], #0x8\n" - "ldr d17, [x24], #0x8\n" + "ldr d17, [x23], #0x8\n" "mov x20, #0x38\n" - "ldr d24, [x23], #0x8\n" + "ldr d24, [x22], #0x8\n" "tbz x14, #0, 104f\n" "ld1 { v12.s }[2], [x13]\n" - "ld1 { v17.s }[2], [x24]\n" - "ld1 { v24.s }[2], [x23]\n" + "ld1 { v17.s }[2], [x23]\n" + "ld1 { v24.s }[2], [x22]\n" "b 104f\n" "97:" // Height 3: Partial accumulate: partial_1_12 "mov x20, #0x30\n" "tbz x14, #0, 104f\n" "ldr s12, [x13, #0x0]\n" - "ldr s17, [x24, #0x0]\n" - "ldr s24, [x23, #0x0]\n" + "ldr s17, [x23, #0x0]\n" + "ldr s24, [x22, #0x0]\n" "b 104f\n" "98:" // Height 3: Partial accumulate: partial_2_8 "tbz x14, #1, 99f\n" "ldr d11, [x13], #0x8\n" - "ldr d16, [x24], #0x8\n" + "ldr d16, [x23], #0x8\n" "mov x20, #0x28\n" - "ldr d23, [x23], #0x8\n" + "ldr d23, [x22], #0x8\n" "tbz x14, #0, 104f\n" "ld1 { v11.s }[2], [x13]\n" - "ld1 { v16.s }[2], [x24]\n" - "ld1 { v23.s }[2], [x23]\n" + "ld1 { v16.s }[2], [x23]\n" + "ld1 { v23.s }[2], [x22]\n" "b 104f\n" "99:" // Height 3: Partial accumulate: partial_1_8 "mov x20, #0x20\n" "tbz x14, #0, 104f\n" "ldr s11, [x13, #0x0]\n" - "ldr s16, [x24, #0x0]\n" - "ldr s23, [x23, #0x0]\n" + "ldr s16, [x23, #0x0]\n" + "ldr s23, [x22, #0x0]\n" "b 104f\n" "100:" // Height 3: Partial accumulate: partial_4_0 "tbz x14, #2, 102f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" - "ld1 { v21.4s }, [x23], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" + "ld1 { v21.4s }, [x22], #0x10\n" "tbz x14, #1, 101f\n" "ldr d10, [x13], #0x8\n" - "ldr d15, [x24], #0x8\n" + "ldr d15, [x23], #0x8\n" "mov x20, #0x18\n" - "ldr d22, [x23], #0x8\n" + "ldr d22, [x22], #0x8\n" "tbz x14, #0, 104f\n" "ld1 { v10.s }[2], [x13]\n" - "ld1 { v15.s }[2], [x24]\n" - "ld1 { v22.s }[2], [x23]\n" + "ld1 { v15.s }[2], [x23]\n" + "ld1 { v22.s }[2], [x22]\n" "b 104f\n" "101:" // Height 3: Partial accumulate: partial_1_4 "mov x20, #0x10\n" "tbz x14, #0, 104f\n" "ldr s10, [x13, #0x0]\n" - "ldr s15, [x24, #0x0]\n" - "ldr s22, [x23, #0x0]\n" + "ldr s15, [x23, #0x0]\n" + "ldr s22, [x22, #0x0]\n" "b 104f\n" "102:" // Height 3: Partial accumulate: partial_2_0 "tbz x14, #1, 103f\n" "ldr d9, [x13], #0x8\n" - "ldr d14, [x24], #0x8\n" + "ldr d14, [x23], #0x8\n" "mov x20, #0x8\n" - "ldr d21, [x23], #0x8\n" + "ldr d21, [x22], #0x8\n" "tbz x14, #0, 104f\n" "ld1 { v9.s }[2], [x13]\n" - "ld1 { v14.s }[2], [x24]\n" - "ld1 { v21.s }[2], [x23]\n" + "ld1 { v14.s }[2], [x23]\n" + "ld1 { v21.s }[2], [x22]\n" "b 104f\n" "103:" // Height 3: Partial accumulate: partial_1_0 "ldr s9, [x13, #0x0]\n" - "ldr s14, [x24, #0x0]\n" + "ldr s14, [x23, #0x0]\n" "mov x20, #0x0\n" - "ldr s21, [x23, #0x0]\n" + "ldr s21, [x22, #0x0]\n" "104:" // Height 3: Partial accumulate: Done "sub x13, x13, x20\n" "b 106f\n" @@ -1273,18 +1271,18 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "ldr q12, [x13, #0x30]\n" "ldr q13, [x13, #0x40]\n" "ldr q20, [x13, #0x50]\n" - "ldr q14, [x24, #0x0]\n" - "ldr q15, [x24, #0x10]\n" - "ldr q16, [x24, #0x20]\n" - "ldr q17, [x24, #0x30]\n" - "ldr q18, [x24, #0x40]\n" - "ldr q19, [x24, #0x50]\n" - "ldr q21, [x23, #0x0]\n" - "ldr q22, [x23, #0x10]\n" - "ldr q23, [x23, #0x20]\n" - "ldr q24, [x23, #0x30]\n" - "ldr q25, [x23, #0x40]\n" - "ldr q4, [x23, #0x50]\n" + "ldr q14, [x23, #0x0]\n" + "ldr q15, [x23, #0x10]\n" + "ldr q16, [x23, #0x20]\n" + "ldr q17, [x23, #0x30]\n" + "ldr q18, [x23, #0x40]\n" + "ldr q19, [x23, #0x50]\n" + "ldr q21, [x22, #0x0]\n" + "ldr q22, [x22, #0x10]\n" + "ldr q23, [x22, #0x20]\n" + "ldr q24, [x22, #0x30]\n" + "ldr q25, [x22, #0x40]\n" + "ldr q4, [x22, #0x50]\n" "106:" // Height 3: MMLA fixup "zip1 v8.2d, v9.2d, v14.2d\n" "zip2 v14.2d, v9.2d, v14.2d\n" @@ -1340,8 +1338,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "mov x26, #0x0\n" "109:" // Height 3: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 110f\n" "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" "add x20, x20, x21, LSL #3\n" @@ -1372,42 +1370,42 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "blt 113f\n" "112:" // Height 3: Multiply loop: Main loop head ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" - "sub x25, x25, #0x4\n" - "add x12, x12, #0x20\n" - "cmp x25, #0x8\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" "ld1 { v1.4s }, [x23], #0x10\n" - ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n" - ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n" - ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n" - ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" + ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" + ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n" "ldr q4, [x10, #0x0]\n" ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" + ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n" "ldr q5, [x10, #0x10]\n" ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" + "sub x25, x25, #0x4\n" + ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n" "ldr q6, [x9, #0x0]\n" ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" + "cmp x25, #0x8\n" + ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n" "ldr q3, [x9, #0x10]\n" - "add x10, x10, #0x20\n" - "add x9, x9, #0x20\n" ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n" + "add x12, x12, #0x20\n" ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n" "ldr q4, [x28, #0x0]\n" ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n" + "add x11, x11, #0x20\n" ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n" "ldr q5, [x28, #0x10]\n" ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n" + "add x10, x10, #0x20\n" ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n" "ldr q6, [x27, #0x0]\n" - "add x28, x28, #0x20\n" ".inst 0x6e43ec11 // bfmmla v17.4s, v0.8h, v3.8h\n" + "add x9, x9, #0x20\n" ".inst 0x6e43ec5d // bfmmla v29.4s, v2.8h, v3.8h\n" "ldr q3, [x27, #0x10]\n" - "add x27, x27, #0x20\n" + "add x28, x28, #0x20\n" ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n" + "add x27, x27, #0x20\n" ".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n" "ldr q4, [x12, #0x0]\n" ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n" @@ -1424,35 +1422,35 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "bge 112b\n" "113:" // Height 3: Multiply loop: Single iteration only ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" - "sub x25, x25, #0x4\n" - "add x12, x12, #0x20\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" - ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n" - ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n" - ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n" - ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" + "sub x25, x25, #0x4\n" + ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" + ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n" "ldr q3, [x10, #0x0]\n" ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" + ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n" "ldr q4, [x10, #0x10]\n" ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" + "add x12, x12, #0x20\n" + ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n" "ldr q6, [x9, #0x0]\n" ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" + "add x11, x11, #0x20\n" + ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n" "ldr q1, [x9, #0x10]\n" - "add x10, x10, #0x20\n" - "add x9, x9, #0x20\n" ".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n" + "add x10, x10, #0x20\n" ".inst 0x6e43ec56 // bfmmla v22.4s, v2.8h, v3.8h\n" "ldr q5, [x28, #0x0]\n" ".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n" + "add x9, x9, #0x20\n" ".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n" "ldr q4, [x28, #0x10]\n" ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n" + "add x28, x28, #0x20\n" ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n" "ldr q3, [x27, #0x0]\n" - "add x28, x28, #0x20\n" ".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n" ".inst 0x6e41ec5d // bfmmla v29.4s, v2.8h, v1.8h\n" "ldr q1, [x27, #0x10]\n" @@ -1485,41 +1483,41 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "ldr q5, [x12, #0x0]\n" "ldr q4, [x12, #0x10]\n" ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" - ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" - "ldr q3, [x11, #0x0]\n" - "ldr q6, [x11, #0x10]\n" - "add x12, x12, #0x20\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" - ".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n" - ".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n" - ".inst 0x6e43ec55 // bfmmla v21.4s, v2.8h, v3.8h\n" - ".inst 0x6e46ec5b // bfmmla v27.4s, v2.8h, v6.8h\n" + "ldr q3, [x11, #0x0]\n" + "ldr q1, [x11, #0x10]\n" + ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" ".inst 0x6e45ec08 // bfmmla v8.4s, v0.8h, v5.8h\n" + ".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n" "ldr q5, [x10, #0x0]\n" ".inst 0x6e44ec0e // bfmmla v14.4s, v0.8h, v4.8h\n" + "add x12, x12, #0x20\n" + ".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n" "ldr q4, [x10, #0x10]\n" ".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n" + "add x11, x11, #0x20\n" + ".inst 0x6e43ec55 // bfmmla v21.4s, v2.8h, v3.8h\n" "ldr q3, [x9, #0x0]\n" - ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n" - "ldr q1, [x9, #0x10]\n" + ".inst 0x6e41ec0f // bfmmla v15.4s, v0.8h, v1.8h\n" "add x10, x10, #0x20\n" - "add x9, x9, #0x20\n" + ".inst 0x6e41ec5b // bfmmla v27.4s, v2.8h, v1.8h\n" + "ldr q1, [x9, #0x10]\n" ".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n" + "add x9, x9, #0x20\n" ".inst 0x6e45ec56 // bfmmla v22.4s, v2.8h, v5.8h\n" "ldr q5, [x28, #0x0]\n" ".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n" ".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n" "ldr q4, [x28, #0x10]\n" ".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n" + "add x28, x28, #0x20\n" ".inst 0x6e43ec57 // bfmmla v23.4s, v2.8h, v3.8h\n" "ldr q3, [x27, #0x0]\n" - "add x28, x28, #0x20\n" ".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n" ".inst 0x6e41ec5d // bfmmla v29.4s, v2.8h, v1.8h\n" "ldr q1, [x27, #0x10]\n" - "add x27, x27, #0x20\n" ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n" + "add x27, x27, #0x20\n" ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n" ".inst 0x6e44ec12 // bfmmla v18.4s, v0.8h, v4.8h\n" ".inst 0x6e44ec5e // bfmmla v30.4s, v2.8h, v4.8h\n" @@ -1533,16 +1531,16 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "cmp x26, x20\n" "bne 109b\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" + "add x23, x13, x20, LSL #2\n" "uzp1 v4.2d, v8.2d, v14.2d\n" "uzp2 v8.2d, v8.2d, v14.2d\n" "uzp1 v14.2d, v9.2d, v15.2d\n" "uzp2 v9.2d, v9.2d, v15.2d\n" + "add x22, x23, x20, LSL #2\n" "uzp1 v15.2d, v10.2d, v16.2d\n" "uzp2 v10.2d, v10.2d, v16.2d\n" - "add x24, x13, x20, LSL #2\n" "uzp1 v16.2d, v11.2d, v17.2d\n" "uzp2 v11.2d, v11.2d, v17.2d\n" - "add x23, x24, x20, LSL #2\n" "uzp1 v17.2d, v12.2d, v18.2d\n" "uzp2 v12.2d, v12.2d, v18.2d\n" "uzp1 v18.2d, v13.2d, v19.2d\n" @@ -1554,9 +1552,9 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "uzp1 v24.2d, v24.2d, v30.2d\n" "uzp1 v25.2d, v25.2d, v31.2d\n" "tbz %x[flags], #1, 118f\n" - "add x21, %x[args_ptr], %[offset_max]\n" + "add x20, %x[args_ptr], %[offset_max]\n" + "ld1r { v1.4s }, [x20]\n" "add x20, %x[args_ptr], %[offset_min]\n" - "ld1r { v1.4s }, [x21]\n" "ld1r { v0.4s }, [x20]\n" "fmin v4.4s, v4.4s, v1.4s\n" "fmin v14.4s, v14.4s, v1.4s\n" @@ -1602,126 +1600,126 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "st1 { v14.4s }, [x13], #0x10\n" "st1 { v15.4s }, [x13], #0x10\n" "st1 { v16.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v9.4s }, [x24], #0x10\n" - "st1 { v10.4s }, [x24], #0x10\n" - "st1 { v11.4s }, [x24], #0x10\n" - "st1 { v20.4s }, [x23], #0x10\n" - "st1 { v21.4s }, [x23], #0x10\n" - "st1 { v22.4s }, [x23], #0x10\n" - "st1 { v23.4s }, [x23], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v9.4s }, [x23], #0x10\n" + "st1 { v10.4s }, [x23], #0x10\n" + "st1 { v11.4s }, [x23], #0x10\n" + "st1 { v20.4s }, [x22], #0x10\n" + "st1 { v21.4s }, [x22], #0x10\n" + "st1 { v22.4s }, [x22], #0x10\n" + "st1 { v23.4s }, [x22], #0x10\n" "tbz x14, #2, 120f\n" "st1 { v17.4s }, [x13], #0x10\n" - "st1 { v12.4s }, [x24], #0x10\n" - "st1 { v24.4s }, [x23], #0x10\n" + "st1 { v12.4s }, [x23], #0x10\n" + "st1 { v24.4s }, [x22], #0x10\n" "tbz x14, #1, 119f\n" "str d18, [x13], #0x8\n" - "str d13, [x24], #0x8\n" - "str d25, [x23], #0x8\n" + "str d13, [x23], #0x8\n" + "str d25, [x22], #0x8\n" "tbz x14, #0, 130f\n" "st1 { v18.s }[2], [x13]\n" - "st1 { v13.s }[2], [x24]\n" - "st1 { v25.s }[2], [x23]\n" + "st1 { v13.s }[2], [x23]\n" + "st1 { v25.s }[2], [x22]\n" "b 130f\n" "119:" // Height 3: Partial direct writeback: partial_1_20 "tbz x14, #0, 130f\n" "str s18, [x13, #0x0]\n" - "str s13, [x24, #0x0]\n" - "str s25, [x23, #0x0]\n" + "str s13, [x23, #0x0]\n" + "str s25, [x22, #0x0]\n" "b 130f\n" "120:" // Height 3: Partial direct writeback: partial_2_16 "tbz x14, #1, 121f\n" "str d17, [x13], #0x8\n" - "str d12, [x24], #0x8\n" - "str d24, [x23], #0x8\n" + "str d12, [x23], #0x8\n" + "str d24, [x22], #0x8\n" "tbz x14, #0, 130f\n" "st1 { v17.s }[2], [x13]\n" - "st1 { v12.s }[2], [x24]\n" - "st1 { v24.s }[2], [x23]\n" + "st1 { v12.s }[2], [x23]\n" + "st1 { v24.s }[2], [x22]\n" "b 130f\n" "121:" // Height 3: Partial direct writeback: partial_1_16 "tbz x14, #0, 130f\n" "str s17, [x13, #0x0]\n" - "str s12, [x24, #0x0]\n" - "str s24, [x23, #0x0]\n" + "str s12, [x23, #0x0]\n" + "str s24, [x22, #0x0]\n" "b 130f\n" "122:" // Height 3: Partial direct writeback: partial_8_0 "tbz x14, #3, 126f\n" "st1 { v4.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v9.4s }, [x24], #0x10\n" - "st1 { v20.4s }, [x23], #0x10\n" - "st1 { v21.4s }, [x23], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v9.4s }, [x23], #0x10\n" + "st1 { v20.4s }, [x22], #0x10\n" + "st1 { v21.4s }, [x22], #0x10\n" "tbz x14, #2, 124f\n" "st1 { v15.4s }, [x13], #0x10\n" - "st1 { v10.4s }, [x24], #0x10\n" - "st1 { v22.4s }, [x23], #0x10\n" + "st1 { v10.4s }, [x23], #0x10\n" + "st1 { v22.4s }, [x22], #0x10\n" "tbz x14, #1, 123f\n" "str d16, [x13], #0x8\n" - "str d11, [x24], #0x8\n" - "str d23, [x23], #0x8\n" + "str d11, [x23], #0x8\n" + "str d23, [x22], #0x8\n" "tbz x14, #0, 130f\n" "st1 { v16.s }[2], [x13]\n" - "st1 { v11.s }[2], [x24]\n" - "st1 { v23.s }[2], [x23]\n" + "st1 { v11.s }[2], [x23]\n" + "st1 { v23.s }[2], [x22]\n" "b 130f\n" "123:" // Height 3: Partial direct writeback: partial_1_12 "tbz x14, #0, 130f\n" "str s16, [x13, #0x0]\n" - "str s11, [x24, #0x0]\n" - "str s23, [x23, #0x0]\n" + "str s11, [x23, #0x0]\n" + "str s23, [x22, #0x0]\n" "b 130f\n" "124:" // Height 3: Partial direct writeback: partial_2_8 "tbz x14, #1, 125f\n" "str d15, [x13], #0x8\n" - "str d10, [x24], #0x8\n" - "str d22, [x23], #0x8\n" + "str d10, [x23], #0x8\n" + "str d22, [x22], #0x8\n" "tbz x14, #0, 130f\n" "st1 { v15.s }[2], [x13]\n" - "st1 { v10.s }[2], [x24]\n" - "st1 { v22.s }[2], [x23]\n" + "st1 { v10.s }[2], [x23]\n" + "st1 { v22.s }[2], [x22]\n" "b 130f\n" "125:" // Height 3: Partial direct writeback: partial_1_8 "tbz x14, #0, 130f\n" "str s15, [x13, #0x0]\n" - "str s10, [x24, #0x0]\n" - "str s22, [x23, #0x0]\n" + "str s10, [x23, #0x0]\n" + "str s22, [x22, #0x0]\n" "b 130f\n" "126:" // Height 3: Partial direct writeback: partial_4_0 "tbz x14, #2, 128f\n" "st1 { v4.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v20.4s }, [x23], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v20.4s }, [x22], #0x10\n" "tbz x14, #1, 127f\n" "str d14, [x13], #0x8\n" - "str d9, [x24], #0x8\n" - "str d21, [x23], #0x8\n" + "str d9, [x23], #0x8\n" + "str d21, [x22], #0x8\n" "tbz x14, #0, 130f\n" "st1 { v14.s }[2], [x13]\n" - "st1 { v9.s }[2], [x24]\n" - "st1 { v21.s }[2], [x23]\n" + "st1 { v9.s }[2], [x23]\n" + "st1 { v21.s }[2], [x22]\n" "b 130f\n" "127:" // Height 3: Partial direct writeback: partial_1_4 "tbz x14, #0, 130f\n" "str s14, [x13, #0x0]\n" - "str s9, [x24, #0x0]\n" - "str s21, [x23, #0x0]\n" + "str s9, [x23, #0x0]\n" + "str s21, [x22, #0x0]\n" "b 130f\n" "128:" // Height 3: Partial direct writeback: partial_2_0 "tbz x14, #1, 129f\n" "str d4, [x13], #0x8\n" - "str d8, [x24], #0x8\n" - "str d20, [x23], #0x8\n" + "str d8, [x23], #0x8\n" + "str d20, [x22], #0x8\n" "tbz x14, #0, 130f\n" "st1 { v4.s }[2], [x13]\n" - "st1 { v8.s }[2], [x24]\n" - "st1 { v20.s }[2], [x23]\n" + "st1 { v8.s }[2], [x23]\n" + "st1 { v20.s }[2], [x22]\n" "b 130f\n" "129:" // Height 3: Partial direct writeback: partial_1_0 "str s4, [x13, #0x0]\n" - "str s8, [x24, #0x0]\n" - "str s20, [x23, #0x0]\n" + "str s8, [x23, #0x0]\n" + "str s20, [x22, #0x0]\n" "130:" // Height 3: Partial direct writeback: Done "b 132f\n" "131:" // Height 3: Full writeback @@ -1732,36 +1730,34 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "str q17, [x13, #0x40]\n" "str q18, [x13, #0x50]\n" "add x13, x13, #0x60\n" - "str q8, [x24, #0x0]\n" - "str q9, [x24, #0x10]\n" - "str q10, [x24, #0x20]\n" - "str q11, [x24, #0x30]\n" - "str q12, [x24, #0x40]\n" - "str q13, [x24, #0x50]\n" - "str q20, [x23, #0x0]\n" - "str q21, [x23, #0x10]\n" - "str q22, [x23, #0x20]\n" - "str q23, [x23, #0x30]\n" - "str q24, [x23, #0x40]\n" - "str q25, [x23, #0x50]\n" + "str q8, [x23, #0x0]\n" + "str q9, [x23, #0x10]\n" + "str q10, [x23, #0x20]\n" + "str q11, [x23, #0x30]\n" + "str q12, [x23, #0x40]\n" + "str q13, [x23, #0x50]\n" + "str q20, [x22, #0x0]\n" + "str q21, [x22, #0x10]\n" + "str q22, [x22, #0x20]\n" + "str q23, [x22, #0x30]\n" + "str q24, [x22, #0x40]\n" + "str q25, [x22, #0x50]\n" "132:" // Height 3: Writeback done "subs x14, x14, #0x18\n" "bgt 90b\n" "b 178f\n" "133:" // Height 4 "ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n" - "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n" - "mov x21, #0x10\n" "ldr x14, [%x[args_ptr], %[offsetof_N]]\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" + "mov x21, #0x10\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n" - "madd x21, x20, x21, x13\n" - "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n" + "mov x15, %x[bias]\n" + "mov x13, %x[output_ptr]\n" + "madd %x[output_ptr], x20, x21, %x[output_ptr]\n" "134:" // Height 4: Column loop "ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n" - "cmp x14, #0x14\n" "add x11, x12, x20, LSL #1\n" "add x10, x11, x20, LSL #1\n" "add x9, x10, x20, LSL #1\n" @@ -1769,6 +1765,7 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "add x27, x28, x20, LSL #1\n" "add x20, x27, x20, LSL #1\n" "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n" + "cmp x14, #0x14\n" "bgt 135f\n" "cmp x14, #0x10\n" "mov x27, x12\n" @@ -1787,19 +1784,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "cbz x15, 136f\n" "ldr q8, [x15, #0x0]\n" "ldr q9, [x15, #0x10]\n" - "ldr q10, [x15, #0x20]\n" - "ldr q11, [x15, #0x30]\n" - "ldr q12, [x15, #0x40]\n" - "ldr q13, [x15, #0x50]\n" - "add x15, x15, #0x60\n" "zip2 v14.2d, v8.2d, v8.2d\n" "zip1 v8.2d, v8.2d, v8.2d\n" + "ldr q10, [x15, #0x20]\n" + "ldr q11, [x15, #0x30]\n" "zip2 v15.2d, v9.2d, v9.2d\n" "zip1 v9.2d, v9.2d, v9.2d\n" + "ldr q12, [x15, #0x40]\n" + "ldr q13, [x15, #0x50]\n" "zip2 v16.2d, v10.2d, v10.2d\n" "zip1 v10.2d, v10.2d, v10.2d\n" "zip2 v17.2d, v11.2d, v11.2d\n" "zip1 v11.2d, v11.2d, v11.2d\n" + "add x15, x15, #0x60\n" "zip2 v18.2d, v12.2d, v12.2d\n" "zip1 v12.2d, v12.2d, v12.2d\n" "zip2 v19.2d, v13.2d, v13.2d\n" @@ -1820,175 +1817,175 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "136:" // Height 4: no bias "tbz %x[flags], #0, 151f\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "cmp x14, #0x18\n" - "add x24, x13, x20, LSL #2\n" - "add x23, x24, x20, LSL #2\n" + "add x23, x13, x20, LSL #2\n" "add x22, x23, x20, LSL #2\n" + "cmp x14, #0x18\n" + "add x21, x22, x20, LSL #2\n" "bge 149f\n" "tbz x14, #4, 140f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" - "ld1 { v21.4s }, [x23], #0x10\n" - "ld1 { v26.4s }, [x22], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" + "ld1 { v21.4s }, [x22], #0x10\n" + "ld1 { v26.4s }, [x21], #0x10\n" "ld1 { v10.4s }, [x13], #0x10\n" - "ld1 { v15.4s }, [x24], #0x10\n" - "ld1 { v22.4s }, [x23], #0x10\n" - "ld1 { v27.4s }, [x22], #0x10\n" + "ld1 { v15.4s }, [x23], #0x10\n" + "ld1 { v22.4s }, [x22], #0x10\n" + "ld1 { v27.4s }, [x21], #0x10\n" "ld1 { v11.4s }, [x13], #0x10\n" - "ld1 { v16.4s }, [x24], #0x10\n" - "ld1 { v23.4s }, [x23], #0x10\n" - "ld1 { v28.4s }, [x22], #0x10\n" + "ld1 { v16.4s }, [x23], #0x10\n" + "ld1 { v23.4s }, [x22], #0x10\n" + "ld1 { v28.4s }, [x21], #0x10\n" "ld1 { v12.4s }, [x13], #0x10\n" - "ld1 { v17.4s }, [x24], #0x10\n" - "ld1 { v24.4s }, [x23], #0x10\n" - "ld1 { v29.4s }, [x22], #0x10\n" + "ld1 { v17.4s }, [x23], #0x10\n" + "ld1 { v24.4s }, [x22], #0x10\n" + "ld1 { v29.4s }, [x21], #0x10\n" "tbz x14, #2, 138f\n" "ld1 { v13.4s }, [x13], #0x10\n" - "ld1 { v18.4s }, [x24], #0x10\n" - "ld1 { v25.4s }, [x23], #0x10\n" - "ld1 { v30.4s }, [x22], #0x10\n" + "ld1 { v18.4s }, [x23], #0x10\n" + "ld1 { v25.4s }, [x22], #0x10\n" + "ld1 { v30.4s }, [x21], #0x10\n" "tbz x14, #1, 137f\n" "ldr d20, [x13], #0x8\n" - "ldr d19, [x24], #0x8\n" + "ldr d19, [x23], #0x8\n" "mov x20, #0x58\n" - "ldr d4, [x23], #0x8\n" - "ldr d31, [x22], #0x8\n" + "ldr d4, [x22], #0x8\n" + "ldr d31, [x21], #0x8\n" "tbz x14, #0, 148f\n" "ld1 { v20.s }[2], [x13]\n" - "ld1 { v19.s }[2], [x24]\n" - "ld1 { v4.s }[2], [x23]\n" - "ld1 { v31.s }[2], [x22]\n" + "ld1 { v19.s }[2], [x23]\n" + "ld1 { v4.s }[2], [x22]\n" + "ld1 { v31.s }[2], [x21]\n" "b 148f\n" "137:" // Height 4: Partial accumulate: partial_1_20 "mov x20, #0x50\n" "tbz x14, #0, 148f\n" "ldr s20, [x13, #0x0]\n" - "ldr s19, [x24, #0x0]\n" - "ldr s4, [x23, #0x0]\n" - "ldr s31, [x22, #0x0]\n" + "ldr s19, [x23, #0x0]\n" + "ldr s4, [x22, #0x0]\n" + "ldr s31, [x21, #0x0]\n" "b 148f\n" "138:" // Height 4: Partial accumulate: partial_2_16 "tbz x14, #1, 139f\n" "ldr d13, [x13], #0x8\n" - "ldr d18, [x24], #0x8\n" + "ldr d18, [x23], #0x8\n" "mov x20, #0x48\n" - "ldr d25, [x23], #0x8\n" - "ldr d30, [x22], #0x8\n" + "ldr d25, [x22], #0x8\n" + "ldr d30, [x21], #0x8\n" "tbz x14, #0, 148f\n" "ld1 { v13.s }[2], [x13]\n" - "ld1 { v18.s }[2], [x24]\n" - "ld1 { v25.s }[2], [x23]\n" - "ld1 { v30.s }[2], [x22]\n" + "ld1 { v18.s }[2], [x23]\n" + "ld1 { v25.s }[2], [x22]\n" + "ld1 { v30.s }[2], [x21]\n" "b 148f\n" "139:" // Height 4: Partial accumulate: partial_1_16 "mov x20, #0x40\n" "tbz x14, #0, 148f\n" "ldr s13, [x13, #0x0]\n" - "ldr s18, [x24, #0x0]\n" - "ldr s25, [x23, #0x0]\n" - "ldr s30, [x22, #0x0]\n" + "ldr s18, [x23, #0x0]\n" + "ldr s25, [x22, #0x0]\n" + "ldr s30, [x21, #0x0]\n" "b 148f\n" "140:" // Height 4: Partial accumulate: partial_8_0 "tbz x14, #3, 144f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" - "ld1 { v21.4s }, [x23], #0x10\n" - "ld1 { v26.4s }, [x22], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" + "ld1 { v21.4s }, [x22], #0x10\n" + "ld1 { v26.4s }, [x21], #0x10\n" "ld1 { v10.4s }, [x13], #0x10\n" - "ld1 { v15.4s }, [x24], #0x10\n" - "ld1 { v22.4s }, [x23], #0x10\n" - "ld1 { v27.4s }, [x22], #0x10\n" + "ld1 { v15.4s }, [x23], #0x10\n" + "ld1 { v22.4s }, [x22], #0x10\n" + "ld1 { v27.4s }, [x21], #0x10\n" "tbz x14, #2, 142f\n" "ld1 { v11.4s }, [x13], #0x10\n" - "ld1 { v16.4s }, [x24], #0x10\n" - "ld1 { v23.4s }, [x23], #0x10\n" - "ld1 { v28.4s }, [x22], #0x10\n" + "ld1 { v16.4s }, [x23], #0x10\n" + "ld1 { v23.4s }, [x22], #0x10\n" + "ld1 { v28.4s }, [x21], #0x10\n" "tbz x14, #1, 141f\n" "ldr d12, [x13], #0x8\n" - "ldr d17, [x24], #0x8\n" + "ldr d17, [x23], #0x8\n" "mov x20, #0x38\n" - "ldr d24, [x23], #0x8\n" - "ldr d29, [x22], #0x8\n" + "ldr d24, [x22], #0x8\n" + "ldr d29, [x21], #0x8\n" "tbz x14, #0, 148f\n" "ld1 { v12.s }[2], [x13]\n" - "ld1 { v17.s }[2], [x24]\n" - "ld1 { v24.s }[2], [x23]\n" - "ld1 { v29.s }[2], [x22]\n" + "ld1 { v17.s }[2], [x23]\n" + "ld1 { v24.s }[2], [x22]\n" + "ld1 { v29.s }[2], [x21]\n" "b 148f\n" "141:" // Height 4: Partial accumulate: partial_1_12 "mov x20, #0x30\n" "tbz x14, #0, 148f\n" "ldr s12, [x13, #0x0]\n" - "ldr s17, [x24, #0x0]\n" - "ldr s24, [x23, #0x0]\n" - "ldr s29, [x22, #0x0]\n" + "ldr s17, [x23, #0x0]\n" + "ldr s24, [x22, #0x0]\n" + "ldr s29, [x21, #0x0]\n" "b 148f\n" "142:" // Height 4: Partial accumulate: partial_2_8 "tbz x14, #1, 143f\n" "ldr d11, [x13], #0x8\n" - "ldr d16, [x24], #0x8\n" + "ldr d16, [x23], #0x8\n" "mov x20, #0x28\n" - "ldr d23, [x23], #0x8\n" - "ldr d28, [x22], #0x8\n" + "ldr d23, [x22], #0x8\n" + "ldr d28, [x21], #0x8\n" "tbz x14, #0, 148f\n" "ld1 { v11.s }[2], [x13]\n" - "ld1 { v16.s }[2], [x24]\n" - "ld1 { v23.s }[2], [x23]\n" - "ld1 { v28.s }[2], [x22]\n" + "ld1 { v16.s }[2], [x23]\n" + "ld1 { v23.s }[2], [x22]\n" + "ld1 { v28.s }[2], [x21]\n" "b 148f\n" "143:" // Height 4: Partial accumulate: partial_1_8 "mov x20, #0x20\n" "tbz x14, #0, 148f\n" "ldr s11, [x13, #0x0]\n" - "ldr s16, [x24, #0x0]\n" - "ldr s23, [x23, #0x0]\n" - "ldr s28, [x22, #0x0]\n" + "ldr s16, [x23, #0x0]\n" + "ldr s23, [x22, #0x0]\n" + "ldr s28, [x21, #0x0]\n" "b 148f\n" "144:" // Height 4: Partial accumulate: partial_4_0 "tbz x14, #2, 146f\n" "ld1 { v9.4s }, [x13], #0x10\n" - "ld1 { v14.4s }, [x24], #0x10\n" - "ld1 { v21.4s }, [x23], #0x10\n" - "ld1 { v26.4s }, [x22], #0x10\n" + "ld1 { v14.4s }, [x23], #0x10\n" + "ld1 { v21.4s }, [x22], #0x10\n" + "ld1 { v26.4s }, [x21], #0x10\n" "tbz x14, #1, 145f\n" "ldr d10, [x13], #0x8\n" - "ldr d15, [x24], #0x8\n" + "ldr d15, [x23], #0x8\n" "mov x20, #0x18\n" - "ldr d22, [x23], #0x8\n" - "ldr d27, [x22], #0x8\n" + "ldr d22, [x22], #0x8\n" + "ldr d27, [x21], #0x8\n" "tbz x14, #0, 148f\n" "ld1 { v10.s }[2], [x13]\n" - "ld1 { v15.s }[2], [x24]\n" - "ld1 { v22.s }[2], [x23]\n" - "ld1 { v27.s }[2], [x22]\n" + "ld1 { v15.s }[2], [x23]\n" + "ld1 { v22.s }[2], [x22]\n" + "ld1 { v27.s }[2], [x21]\n" "b 148f\n" "145:" // Height 4: Partial accumulate: partial_1_4 "mov x20, #0x10\n" "tbz x14, #0, 148f\n" "ldr s10, [x13, #0x0]\n" - "ldr s15, [x24, #0x0]\n" - "ldr s22, [x23, #0x0]\n" - "ldr s27, [x22, #0x0]\n" + "ldr s15, [x23, #0x0]\n" + "ldr s22, [x22, #0x0]\n" + "ldr s27, [x21, #0x0]\n" "b 148f\n" "146:" // Height 4: Partial accumulate: partial_2_0 "tbz x14, #1, 147f\n" "ldr d9, [x13], #0x8\n" - "ldr d14, [x24], #0x8\n" + "ldr d14, [x23], #0x8\n" "mov x20, #0x8\n" - "ldr d21, [x23], #0x8\n" - "ldr d26, [x22], #0x8\n" + "ldr d21, [x22], #0x8\n" + "ldr d26, [x21], #0x8\n" "tbz x14, #0, 148f\n" "ld1 { v9.s }[2], [x13]\n" - "ld1 { v14.s }[2], [x24]\n" - "ld1 { v21.s }[2], [x23]\n" - "ld1 { v26.s }[2], [x22]\n" + "ld1 { v14.s }[2], [x23]\n" + "ld1 { v21.s }[2], [x22]\n" + "ld1 { v26.s }[2], [x21]\n" "b 148f\n" "147:" // Height 4: Partial accumulate: partial_1_0 "ldr s9, [x13, #0x0]\n" - "ldr s14, [x24, #0x0]\n" + "ldr s14, [x23, #0x0]\n" "mov x20, #0x0\n" - "ldr s21, [x23, #0x0]\n" - "ldr s26, [x22, #0x0]\n" + "ldr s21, [x22, #0x0]\n" + "ldr s26, [x21, #0x0]\n" "148:" // Height 4: Partial accumulate: Done "sub x13, x13, x20\n" "b 150f\n" @@ -1999,24 +1996,24 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "ldr q12, [x13, #0x30]\n" "ldr q13, [x13, #0x40]\n" "ldr q20, [x13, #0x50]\n" - "ldr q14, [x24, #0x0]\n" - "ldr q15, [x24, #0x10]\n" - "ldr q16, [x24, #0x20]\n" - "ldr q17, [x24, #0x30]\n" - "ldr q18, [x24, #0x40]\n" - "ldr q19, [x24, #0x50]\n" - "ldr q21, [x23, #0x0]\n" - "ldr q22, [x23, #0x10]\n" - "ldr q23, [x23, #0x20]\n" - "ldr q24, [x23, #0x30]\n" - "ldr q25, [x23, #0x40]\n" - "ldr q4, [x23, #0x50]\n" - "ldr q26, [x22, #0x0]\n" - "ldr q27, [x22, #0x10]\n" - "ldr q28, [x22, #0x20]\n" - "ldr q29, [x22, #0x30]\n" - "ldr q30, [x22, #0x40]\n" - "ldr q31, [x22, #0x50]\n" + "ldr q14, [x23, #0x0]\n" + "ldr q15, [x23, #0x10]\n" + "ldr q16, [x23, #0x20]\n" + "ldr q17, [x23, #0x30]\n" + "ldr q18, [x23, #0x40]\n" + "ldr q19, [x23, #0x50]\n" + "ldr q21, [x22, #0x0]\n" + "ldr q22, [x22, #0x10]\n" + "ldr q23, [x22, #0x20]\n" + "ldr q24, [x22, #0x30]\n" + "ldr q25, [x22, #0x40]\n" + "ldr q4, [x22, #0x50]\n" + "ldr q26, [x21, #0x0]\n" + "ldr q27, [x21, #0x10]\n" + "ldr q28, [x21, #0x20]\n" + "ldr q29, [x21, #0x30]\n" + "ldr q30, [x21, #0x40]\n" + "ldr q31, [x21, #0x50]\n" "150:" // Height 4: MMLA fixup "zip1 v8.2d, v9.2d, v14.2d\n" "zip2 v14.2d, v9.2d, v14.2d\n" @@ -2072,8 +2069,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "mov x26, #0x0\n" "153:" // Height 4: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 154f\n" "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" "add x20, x20, x21, LSL #3\n" @@ -2110,26 +2107,26 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n" ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" "sub x25, x25, #0x4\n" - "add x12, x12, #0x20\n" "cmp x25, #0x8\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" "ld1 { v1.4s }, [x23], #0x10\n" ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n" "ld1 { v3.4s }, [x21], #0x10\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" - ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" - ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" - ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n" "ldr q4, [x10, #0x0]\n" + "add x12, x12, #0x20\n" + ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n" "ldr q5, [x10, #0x10]\n" + "add x11, x11, #0x20\n" + ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n" "ldr q6, [x9, #0x0]\n" + "add x10, x10, #0x20\n" + ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n" "ldr q7, [x9, #0x10]\n" - "add x10, x10, #0x20\n" "add x9, x9, #0x20\n" ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n" ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n" @@ -2137,10 +2134,10 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n" ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n" "ldr q5, [x28, #0x10]\n" + "add x28, x28, #0x20\n" ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n" ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n" "ldr q6, [x27, #0x0]\n" - "add x28, x28, #0x20\n" ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n" ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n" "ldr q7, [x27, #0x10]\n" @@ -2165,31 +2162,31 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" "sub x25, x25, #0x4\n" "add x12, x12, #0x20\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n" ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n" - ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" - ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" - ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" + "add x11, x11, #0x20\n" ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n" "ldr q3, [x10, #0x0]\n" + ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n" ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n" "ldr q4, [x10, #0x10]\n" + ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n" + "add x10, x10, #0x20\n" ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n" "ldr q6, [x9, #0x0]\n" + ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n" ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n" "ldr q1, [x9, #0x10]\n" - "add x10, x10, #0x20\n" - "add x9, x9, #0x20\n" ".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n" + "add x9, x9, #0x20\n" ".inst 0x6e43ec56 // bfmmla v22.4s, v2.8h, v3.8h\n" "ldr q5, [x28, #0x0]\n" ".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n" ".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n" "ldr q4, [x28, #0x10]\n" - "add x28, x28, #0x20\n" ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n" + "add x28, x28, #0x20\n" ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n" "ldr q3, [x27, #0x0]\n" ".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n" @@ -2230,39 +2227,39 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n" "ldr q7, [x11, #0x0]\n" "ldr q6, [x11, #0x10]\n" - "add x12, x12, #0x20\n" - "add x11, x11, #0x20\n" ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n" ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n" ".inst 0x6e45ec08 // bfmmla v8.4s, v0.8h, v5.8h\n" ".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n" "ldr q5, [x10, #0x0]\n" + "add x12, x12, #0x20\n" ".inst 0x6e44ec0e // bfmmla v14.4s, v0.8h, v4.8h\n" ".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n" "ldr q4, [x10, #0x10]\n" - "add x10, x10, #0x20\n" + "add x11, x11, #0x20\n" ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n" ".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n" "ldr q3, [x9, #0x0]\n" + "add x10, x10, #0x20\n" ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n" ".inst 0x6e46ec5b // bfmmla v27.4s, v2.8h, v6.8h\n" "ldr q1, [x9, #0x10]\n" - ".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n" "add x9, x9, #0x20\n" + ".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n" ".inst 0x6e45ec56 // bfmmla v22.4s, v2.8h, v5.8h\n" "ldr q5, [x28, #0x0]\n" ".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n" ".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n" "ldr q4, [x28, #0x10]\n" - ".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n" "add x28, x28, #0x20\n" + ".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n" ".inst 0x6e43ec57 // bfmmla v23.4s, v2.8h, v3.8h\n" "ldr q3, [x27, #0x0]\n" ".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n" ".inst 0x6e41ec5d // bfmmla v29.4s, v2.8h, v1.8h\n" "ldr q1, [x27, #0x10]\n" - ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n" "add x27, x27, #0x20\n" + ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n" ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n" ".inst 0x6e44ec12 // bfmmla v18.4s, v0.8h, v4.8h\n" ".inst 0x6e44ec5e // bfmmla v30.4s, v2.8h, v4.8h\n" @@ -2276,17 +2273,17 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "cmp x26, x20\n" "bne 153b\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" + "add x23, x13, x20, LSL #2\n" + "add x22, x23, x20, LSL #2\n" "uzp1 v4.2d, v8.2d, v14.2d\n" "uzp2 v8.2d, v8.2d, v14.2d\n" "uzp1 v14.2d, v9.2d, v15.2d\n" + "add x21, x22, x20, LSL #2\n" "uzp2 v9.2d, v9.2d, v15.2d\n" "uzp1 v15.2d, v10.2d, v16.2d\n" "uzp2 v10.2d, v10.2d, v16.2d\n" - "add x24, x13, x20, LSL #2\n" - "add x23, x24, x20, LSL #2\n" "uzp1 v16.2d, v11.2d, v17.2d\n" "uzp2 v11.2d, v11.2d, v17.2d\n" - "add x22, x23, x20, LSL #2\n" "uzp1 v17.2d, v12.2d, v18.2d\n" "uzp2 v12.2d, v12.2d, v18.2d\n" "uzp1 v18.2d, v13.2d, v19.2d\n" @@ -2304,9 +2301,9 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "uzp1 v30.2d, v25.2d, v31.2d\n" "uzp2 v25.2d, v25.2d, v31.2d\n" "tbz %x[flags], #1, 162f\n" - "add x21, %x[args_ptr], %[offset_max]\n" + "add x20, %x[args_ptr], %[offset_max]\n" + "ld1r { v1.4s }, [x20]\n" "add x20, %x[args_ptr], %[offset_min]\n" - "ld1r { v1.4s }, [x21]\n" "ld1r { v0.4s }, [x20]\n" "fmin v4.4s, v4.4s, v1.4s\n" "fmin v14.4s, v14.4s, v1.4s\n" @@ -2364,153 +2361,153 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "st1 { v14.4s }, [x13], #0x10\n" "st1 { v15.4s }, [x13], #0x10\n" "st1 { v16.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v9.4s }, [x24], #0x10\n" - "st1 { v10.4s }, [x24], #0x10\n" - "st1 { v11.4s }, [x24], #0x10\n" - "st1 { v19.4s }, [x23], #0x10\n" - "st1 { v26.4s }, [x23], #0x10\n" - "st1 { v27.4s }, [x23], #0x10\n" - "st1 { v28.4s }, [x23], #0x10\n" - "st1 { v20.4s }, [x22], #0x10\n" - "st1 { v21.4s }, [x22], #0x10\n" - "st1 { v22.4s }, [x22], #0x10\n" - "st1 { v23.4s }, [x22], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v9.4s }, [x23], #0x10\n" + "st1 { v10.4s }, [x23], #0x10\n" + "st1 { v11.4s }, [x23], #0x10\n" + "st1 { v19.4s }, [x22], #0x10\n" + "st1 { v26.4s }, [x22], #0x10\n" + "st1 { v27.4s }, [x22], #0x10\n" + "st1 { v28.4s }, [x22], #0x10\n" + "st1 { v20.4s }, [x21], #0x10\n" + "st1 { v21.4s }, [x21], #0x10\n" + "st1 { v22.4s }, [x21], #0x10\n" + "st1 { v23.4s }, [x21], #0x10\n" "tbz x14, #2, 164f\n" "st1 { v17.4s }, [x13], #0x10\n" - "st1 { v12.4s }, [x24], #0x10\n" - "st1 { v29.4s }, [x23], #0x10\n" - "st1 { v24.4s }, [x22], #0x10\n" + "st1 { v12.4s }, [x23], #0x10\n" + "st1 { v29.4s }, [x22], #0x10\n" + "st1 { v24.4s }, [x21], #0x10\n" "tbz x14, #1, 163f\n" "str d18, [x13], #0x8\n" - "str d13, [x24], #0x8\n" - "str d30, [x23], #0x8\n" - "str d25, [x22], #0x8\n" + "str d13, [x23], #0x8\n" + "str d30, [x22], #0x8\n" + "str d25, [x21], #0x8\n" "tbz x14, #0, 174f\n" "st1 { v18.s }[2], [x13]\n" - "st1 { v13.s }[2], [x24]\n" - "st1 { v30.s }[2], [x23]\n" - "st1 { v25.s }[2], [x22]\n" + "st1 { v13.s }[2], [x23]\n" + "st1 { v30.s }[2], [x22]\n" + "st1 { v25.s }[2], [x21]\n" "b 174f\n" "163:" // Height 4: Partial direct writeback: partial_1_20 "tbz x14, #0, 174f\n" "str s18, [x13, #0x0]\n" - "str s13, [x24, #0x0]\n" - "str s30, [x23, #0x0]\n" - "str s25, [x22, #0x0]\n" + "str s13, [x23, #0x0]\n" + "str s30, [x22, #0x0]\n" + "str s25, [x21, #0x0]\n" "b 174f\n" "164:" // Height 4: Partial direct writeback: partial_2_16 "tbz x14, #1, 165f\n" "str d17, [x13], #0x8\n" - "str d12, [x24], #0x8\n" - "str d29, [x23], #0x8\n" - "str d24, [x22], #0x8\n" + "str d12, [x23], #0x8\n" + "str d29, [x22], #0x8\n" + "str d24, [x21], #0x8\n" "tbz x14, #0, 174f\n" "st1 { v17.s }[2], [x13]\n" - "st1 { v12.s }[2], [x24]\n" - "st1 { v29.s }[2], [x23]\n" - "st1 { v24.s }[2], [x22]\n" + "st1 { v12.s }[2], [x23]\n" + "st1 { v29.s }[2], [x22]\n" + "st1 { v24.s }[2], [x21]\n" "b 174f\n" "165:" // Height 4: Partial direct writeback: partial_1_16 "tbz x14, #0, 174f\n" "str s17, [x13, #0x0]\n" - "str s12, [x24, #0x0]\n" - "str s29, [x23, #0x0]\n" - "str s24, [x22, #0x0]\n" + "str s12, [x23, #0x0]\n" + "str s29, [x22, #0x0]\n" + "str s24, [x21, #0x0]\n" "b 174f\n" "166:" // Height 4: Partial direct writeback: partial_8_0 "tbz x14, #3, 170f\n" "st1 { v4.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v9.4s }, [x24], #0x10\n" - "st1 { v19.4s }, [x23], #0x10\n" - "st1 { v26.4s }, [x23], #0x10\n" - "st1 { v20.4s }, [x22], #0x10\n" - "st1 { v21.4s }, [x22], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v9.4s }, [x23], #0x10\n" + "st1 { v19.4s }, [x22], #0x10\n" + "st1 { v26.4s }, [x22], #0x10\n" + "st1 { v20.4s }, [x21], #0x10\n" + "st1 { v21.4s }, [x21], #0x10\n" "tbz x14, #2, 168f\n" "st1 { v15.4s }, [x13], #0x10\n" - "st1 { v10.4s }, [x24], #0x10\n" - "st1 { v27.4s }, [x23], #0x10\n" - "st1 { v22.4s }, [x22], #0x10\n" + "st1 { v10.4s }, [x23], #0x10\n" + "st1 { v27.4s }, [x22], #0x10\n" + "st1 { v22.4s }, [x21], #0x10\n" "tbz x14, #1, 167f\n" "str d16, [x13], #0x8\n" - "str d11, [x24], #0x8\n" - "str d28, [x23], #0x8\n" - "str d23, [x22], #0x8\n" + "str d11, [x23], #0x8\n" + "str d28, [x22], #0x8\n" + "str d23, [x21], #0x8\n" "tbz x14, #0, 174f\n" "st1 { v16.s }[2], [x13]\n" - "st1 { v11.s }[2], [x24]\n" - "st1 { v28.s }[2], [x23]\n" - "st1 { v23.s }[2], [x22]\n" + "st1 { v11.s }[2], [x23]\n" + "st1 { v28.s }[2], [x22]\n" + "st1 { v23.s }[2], [x21]\n" "b 174f\n" "167:" // Height 4: Partial direct writeback: partial_1_12 "tbz x14, #0, 174f\n" "str s16, [x13, #0x0]\n" - "str s11, [x24, #0x0]\n" - "str s28, [x23, #0x0]\n" - "str s23, [x22, #0x0]\n" + "str s11, [x23, #0x0]\n" + "str s28, [x22, #0x0]\n" + "str s23, [x21, #0x0]\n" "b 174f\n" "168:" // Height 4: Partial direct writeback: partial_2_8 "tbz x14, #1, 169f\n" "str d15, [x13], #0x8\n" - "str d10, [x24], #0x8\n" - "str d27, [x23], #0x8\n" - "str d22, [x22], #0x8\n" + "str d10, [x23], #0x8\n" + "str d27, [x22], #0x8\n" + "str d22, [x21], #0x8\n" "tbz x14, #0, 174f\n" "st1 { v15.s }[2], [x13]\n" - "st1 { v10.s }[2], [x24]\n" - "st1 { v27.s }[2], [x23]\n" - "st1 { v22.s }[2], [x22]\n" + "st1 { v10.s }[2], [x23]\n" + "st1 { v27.s }[2], [x22]\n" + "st1 { v22.s }[2], [x21]\n" "b 174f\n" "169:" // Height 4: Partial direct writeback: partial_1_8 "tbz x14, #0, 174f\n" "str s15, [x13, #0x0]\n" - "str s10, [x24, #0x0]\n" - "str s27, [x23, #0x0]\n" - "str s22, [x22, #0x0]\n" + "str s10, [x23, #0x0]\n" + "str s27, [x22, #0x0]\n" + "str s22, [x21, #0x0]\n" "b 174f\n" "170:" // Height 4: Partial direct writeback: partial_4_0 "tbz x14, #2, 172f\n" "st1 { v4.4s }, [x13], #0x10\n" - "st1 { v8.4s }, [x24], #0x10\n" - "st1 { v19.4s }, [x23], #0x10\n" - "st1 { v20.4s }, [x22], #0x10\n" + "st1 { v8.4s }, [x23], #0x10\n" + "st1 { v19.4s }, [x22], #0x10\n" + "st1 { v20.4s }, [x21], #0x10\n" "tbz x14, #1, 171f\n" "str d14, [x13], #0x8\n" - "str d9, [x24], #0x8\n" - "str d26, [x23], #0x8\n" - "str d21, [x22], #0x8\n" + "str d9, [x23], #0x8\n" + "str d26, [x22], #0x8\n" + "str d21, [x21], #0x8\n" "tbz x14, #0, 174f\n" "st1 { v14.s }[2], [x13]\n" - "st1 { v9.s }[2], [x24]\n" - "st1 { v26.s }[2], [x23]\n" - "st1 { v21.s }[2], [x22]\n" + "st1 { v9.s }[2], [x23]\n" + "st1 { v26.s }[2], [x22]\n" + "st1 { v21.s }[2], [x21]\n" "b 174f\n" "171:" // Height 4: Partial direct writeback: partial_1_4 "tbz x14, #0, 174f\n" "str s14, [x13, #0x0]\n" - "str s9, [x24, #0x0]\n" - "str s26, [x23, #0x0]\n" - "str s21, [x22, #0x0]\n" + "str s9, [x23, #0x0]\n" + "str s26, [x22, #0x0]\n" + "str s21, [x21, #0x0]\n" "b 174f\n" "172:" // Height 4: Partial direct writeback: partial_2_0 "tbz x14, #1, 173f\n" "str d4, [x13], #0x8\n" - "str d8, [x24], #0x8\n" - "str d19, [x23], #0x8\n" - "str d20, [x22], #0x8\n" + "str d8, [x23], #0x8\n" + "str d19, [x22], #0x8\n" + "str d20, [x21], #0x8\n" "tbz x14, #0, 174f\n" "st1 { v4.s }[2], [x13]\n" - "st1 { v8.s }[2], [x24]\n" - "st1 { v19.s }[2], [x23]\n" - "st1 { v20.s }[2], [x22]\n" + "st1 { v8.s }[2], [x23]\n" + "st1 { v19.s }[2], [x22]\n" + "st1 { v20.s }[2], [x21]\n" "b 174f\n" "173:" // Height 4: Partial direct writeback: partial_1_0 "str s4, [x13, #0x0]\n" - "str s8, [x24, #0x0]\n" - "str s19, [x23, #0x0]\n" - "str s20, [x22, #0x0]\n" + "str s8, [x23, #0x0]\n" + "str s19, [x22, #0x0]\n" + "str s20, [x21, #0x0]\n" "174:" // Height 4: Partial direct writeback: Done "b 176f\n" "175:" // Height 4: Full writeback @@ -2521,24 +2518,24 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "str q17, [x13, #0x40]\n" "str q18, [x13, #0x50]\n" "add x13, x13, #0x60\n" - "str q8, [x24, #0x0]\n" - "str q9, [x24, #0x10]\n" - "str q10, [x24, #0x20]\n" - "str q11, [x24, #0x30]\n" - "str q12, [x24, #0x40]\n" - "str q13, [x24, #0x50]\n" - "str q19, [x23, #0x0]\n" - "str q26, [x23, #0x10]\n" - "str q27, [x23, #0x20]\n" - "str q28, [x23, #0x30]\n" - "str q29, [x23, #0x40]\n" - "str q30, [x23, #0x50]\n" - "str q20, [x22, #0x0]\n" - "str q21, [x22, #0x10]\n" - "str q22, [x22, #0x20]\n" - "str q23, [x22, #0x30]\n" - "str q24, [x22, #0x40]\n" - "str q25, [x22, #0x50]\n" + "str q8, [x23, #0x0]\n" + "str q9, [x23, #0x10]\n" + "str q10, [x23, #0x20]\n" + "str q11, [x23, #0x30]\n" + "str q12, [x23, #0x40]\n" + "str q13, [x23, #0x50]\n" + "str q19, [x22, #0x0]\n" + "str q26, [x22, #0x10]\n" + "str q27, [x22, #0x20]\n" + "str q28, [x22, #0x30]\n" + "str q29, [x22, #0x40]\n" + "str q30, [x22, #0x50]\n" + "str q20, [x21, #0x0]\n" + "str q21, [x21, #0x10]\n" + "str q22, [x21, #0x20]\n" + "str q23, [x21, #0x30]\n" + "str q24, [x21, #0x40]\n" + "str q25, [x21, #0x50]\n" "176:" // Height 4: Writeback done "subs x14, x14, #0x18\n" "bgt 134b\n" @@ -2554,8 +2551,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 ( "madd %x[input_ptr], x20, x21, %x[input_ptr]\n" "b 1b\n" "178:" // Exit - : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr) - : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) + : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } |