From 74921eee924625426429044decefe3673561b174 Mon Sep 17 00:00:00 2001 From: Michael Tyler Date: Wed, 12 Apr 2023 17:43:17 +0100 Subject: Update CPU kernel implementations and guard directives Resolves COMPMID-6023 Change-Id: I868975d14c4f98af6716726feda22405a6a4c891 Signed-off-by: Michael Tyler Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9686 Tested-by: Arm Jenkins Reviewed-by: Viet-Hoa Do Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- .../kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp | 4462 ++++++++++---------- 1 file changed, 2230 insertions(+), 2232 deletions(-) (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp') diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp index b97b63cdce..38a57b0741 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp @@ -85,7 +85,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( flags |= 0x20; } __asm__ __volatile__( - "1:" // Row loop "cmp %x[M], #0x6\n" "bge 136f\n" @@ -111,11 +110,11 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "4:" // Height 1: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w13, [x20, x14, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 5f\n" - "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x12, [x21, #0x0]\n" + "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x12, [x20, #0x0]\n" "cbnz x14, 6f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x12, x12, x20\n" @@ -132,129 +131,129 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "blt 8f\n" "7:" // Height 1: Multiply loop: Main loop head ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr d6, [x15, #0x20]\n" + "ldr d17, [x15, #0x20]\n" "ldr x20, [x15, #0x28]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "ldr d7, [x15, #0x30]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x38]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - "ldr d6, [x15, #0x40]\n" + "ldr d16, [x15, #0x30]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0x38]\n" + "mov v16.d[1], x20\n" + ".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n" + "ldr d17, [x15, #0x40]\n" "ldr x20, [x15, #0x48]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - "ldr d7, [x15, #0x50]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x58]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - "ldr d6, [x15, #0x60]\n" + ".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n" + "ldr d16, [x15, #0x50]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0x58]\n" + "mov v16.d[1], x20\n" + ".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n" + "ldr d17, [x15, #0x60]\n" "ldr x20, [x15, #0x68]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - "ldr d7, [x15, #0x70]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x78]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - "ldr d6, [x15, #0x80]\n" + ".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n" + "ldr d16, [x15, #0x70]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0x78]\n" + "mov v16.d[1], x20\n" + ".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n" + "ldr d17, [x15, #0x80]\n" "ldr x20, [x15, #0x88]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - "ldr d7, [x15, #0x90]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x98]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - "ldr d6, [x15, #0xa0]\n" + ".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n" + "ldr d16, [x15, #0x90]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0x98]\n" + "mov v16.d[1], x20\n" + ".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n" + "ldr d17, [x15, #0xa0]\n" "ldr x20, [x15, #0xa8]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - "ldr d7, [x15, #0xb0]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0xb8]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - "ldr d6, [x15, #0xc0]\n" + ".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n" + "ldr d16, [x15, #0xb0]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0xb8]\n" + "mov v16.d[1], x20\n" + ".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n" + "ldr d17, [x15, #0xc0]\n" "ldr x20, [x15, #0xc8]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - "ldr d7, [x15, #0xd0]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0xd8]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - "ldr d6, [x15, #0xe0]\n" + ".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n" + "ldr d16, [x15, #0xd0]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0xd8]\n" + "mov v16.d[1], x20\n" + ".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n" + "ldr d17, [x15, #0xe0]\n" "ldr x20, [x15, #0xe8]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - "ldr d7, [x15, #0xf0]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0xf8]\n" - "mov v7.d[1], x11\n" + ".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n" + "ldr d16, [x15, #0xf0]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0xf8]\n" + "mov v16.d[1], x20\n" "add x12, x12, #0x10\n" "add x15, x15, #0x100\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" + ".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n" "ldr d6, [x15, #0x0]\n" "ldr x20, [x15, #0x8]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" + ".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n" "ldr d0, [x12, #0x0]\n" "sub x13, x13, #0x10\n" "ldr d7, [x15, #0x10]\n" "cmp x13, #0x20\n" - "ldr x10, [x12, #0x8]\n" + "ldr x21, [x12, #0x8]\n" "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x18]\n" - "mov v0.d[1], x10\n" - "mov v7.d[1], x11\n" + "ldr x20, [x15, #0x18]\n" + "mov v0.d[1], x21\n" + "mov v7.d[1], x20\n" "prfm pldl1keep, [x12, #0x80]\n" "bge 7b\n" "8:" // Height 1: Multiply loop: Single iteration only ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q6, [x15, #0x20]\n" + "ldr q17, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - "ldr q6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x50]\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - "ldr q6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - "ldr q7, [x15, #0x70]\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - "ldr q6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - "ldr q7, [x15, #0x90]\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - "ldr q6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - "ldr q7, [x15, #0xb0]\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - "ldr q6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - "ldr q7, [x15, #0xd0]\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - "ldr q6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - "ldr q7, [x15, #0xf0]\n" + "ldr q16, [x15, #0x30]\n" + ".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n" + "ldr q17, [x15, #0x40]\n" + ".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x50]\n" + ".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n" + "ldr q17, [x15, #0x60]\n" + ".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n" + "ldr q16, [x15, #0x70]\n" + ".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n" + "ldr q17, [x15, #0x80]\n" + ".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n" + "ldr q16, [x15, #0x90]\n" + ".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n" + "ldr q17, [x15, #0xa0]\n" + ".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n" + "ldr q16, [x15, #0xb0]\n" + ".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n" + "ldr q17, [x15, #0xc0]\n" + ".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n" + "ldr q16, [x15, #0xd0]\n" + ".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n" + "ldr q17, [x15, #0xe0]\n" + ".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n" + "ldr q16, [x15, #0xf0]\n" "add x12, x12, #0x10\n" "sub x13, x13, #0x10\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" + ".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n" "prfm pldl1keep, [x12, #0x80]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" + ".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n" "add x15, x15, #0x100\n" "9:" // Height 1: Multiply loop: Main loop skip "cbz x13, 14f\n" "cmp x13, #0x4\n" "blt 11f\n" "10:" // Height 1: Multiply loop: Odd block loop - "ldr s0, [x12], #0x4\n" + "ldr s18, [x12], #0x4\n" "sub x13, x13, #0x4\n" - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "ldr q6, [x15, #0x20]\n" + "ldr q16, [x15, #0x0]\n" + ".inst 0x4f92e208 // sdot v8.4s, v16.16b, v18.4b[0]\n" + "ldr q16, [x15, #0x10]\n" + ".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n" + "ldr q17, [x15, #0x20]\n" "cmp x13, #0x4\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x30]\n" + ".inst 0x4f92e22a // sdot v10.4s, v17.16b, v18.4b[0]\n" + ".inst 0x4f92e20b // sdot v11.4s, v16.16b, v18.4b[0]\n" "add x15, x15, #0x40\n" "bge 10b\n" "11:" // Height 1: Multiply loop: Skip odd blocks @@ -267,28 +266,28 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "12:" // Height 1: Multiply loop: Ragged operand read: partial_1_0 "ldr b0, [x12, #0x0]\n" "13:" // Height 1: Multiply loop: Ragged operand read: Done - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x0]\n" + ".inst 0x4f80e208 // sdot v8.4s, v16.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x10]\n" + ".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x20]\n" + ".inst 0x4f80e20a // sdot v10.4s, v16.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x30]\n" + ".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n" "add x15, x15, #0x40\n" "14:" // Height 1: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x14, x14, #0x1\n" "cmp x14, x20\n" "bne 4b\n" - "ldr q0, [x6, #0x0]\n" - "add v8.4s, v8.4s, v0.4s\n" - "ldr q1, [x6, #0x10]\n" - "add v9.4s, v9.4s, v1.4s\n" - "ldr q2, [x6, #0x20]\n" - "add v10.4s, v10.4s, v2.4s\n" - "ldr q3, [x6, #0x30]\n" - "add v11.4s, v11.4s, v3.4s\n" + "ldr q16, [x6, #0x0]\n" + "add v8.4s, v8.4s, v16.4s\n" + "ldr q16, [x6, #0x10]\n" + "add v9.4s, v9.4s, v16.4s\n" + "ldr q16, [x6, #0x20]\n" + "add v10.4s, v10.4s, v16.4s\n" + "ldr q16, [x6, #0x30]\n" + "add v11.4s, v11.4s, v16.4s\n" "prfm pstl1keep, [x17, #0x0]\n" "add x6, x6, #0x40\n" "tbz %x[flags], #4, 15f\n" @@ -304,10 +303,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "add x7, x7, #0x40\n" "b 16f\n" "15:" // Height 1: per layer parameters - "add x25, %x[qp], %[per_layer_right_shift]\n" - "ld1r { v0.4s }, [x25]\n" - "add x25, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x25]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "ld1r { v0.4s }, [x20]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v4.4s }, [x20]\n" "mov v1.16b, v0.16b\n" "mov v5.16b, v4.16b\n" "mov v2.16b, v0.16b\n" @@ -320,45 +319,45 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "sqrdmulh v10.4s, v10.4s, v6.4s\n" "sqrdmulh v11.4s, v11.4s, v7.4s\n" "tbz %x[flags], #5, 17f\n" - "and v4.16b, v8.16b, v0.16b\n" - "and v5.16b, v9.16b, v1.16b\n" - "and v6.16b, v10.16b, v2.16b\n" - "and v7.16b, v11.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v8.4s, v8.4s, v4.4s\n" - "sqadd v9.4s, v9.4s, v5.4s\n" - "sqadd v10.4s, v10.4s, v6.4s\n" - "sqadd v11.4s, v11.4s, v7.4s\n" + "and v19.16b, v8.16b, v0.16b\n" + "and v18.16b, v9.16b, v1.16b\n" + "and v17.16b, v10.16b, v2.16b\n" + "and v16.16b, v11.16b, v3.16b\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sshr v18.4s, v18.4s, #0x1f\n" + "sshr v17.4s, v17.4s, #0x1f\n" + "sshr v16.4s, v16.4s, #0x1f\n" + "sqadd v8.4s, v8.4s, v19.4s\n" + "sqadd v9.4s, v9.4s, v18.4s\n" + "sqadd v10.4s, v10.4s, v17.4s\n" + "sqadd v11.4s, v11.4s, v16.4s\n" "17:" // Height 1: no shift correction "srshl v8.4s, v8.4s, v0.4s\n" "srshl v9.4s, v9.4s, v1.4s\n" "srshl v10.4s, v10.4s, v2.4s\n" "srshl v11.4s, v11.4s, v3.4s\n" - "add x25, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x25]\n" - "add v8.4s, v8.4s, v4.4s\n" - "add v9.4s, v9.4s, v4.4s\n" - "add v10.4s, v10.4s, v4.4s\n" - "add v11.4s, v11.4s, v4.4s\n" - "add x25, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x25]\n" - "smin v8.4s, v8.4s, v6.4s\n" - "smin v9.4s, v9.4s, v6.4s\n" - "smin v10.4s, v10.4s, v6.4s\n" - "smin v11.4s, v11.4s, v6.4s\n" - "add x25, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x25]\n" - "smax v8.4s, v8.4s, v5.4s\n" - "smax v9.4s, v9.4s, v5.4s\n" - "smax v10.4s, v10.4s, v5.4s\n" - "smax v11.4s, v11.4s, v5.4s\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v16.4s }, [x20]\n" + "add v8.4s, v8.4s, v16.4s\n" + "add v9.4s, v9.4s, v16.4s\n" + "add v10.4s, v10.4s, v16.4s\n" + "add v11.4s, v11.4s, v16.4s\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v16.4s }, [x20]\n" + "smin v8.4s, v8.4s, v16.4s\n" + "smin v9.4s, v9.4s, v16.4s\n" + "smin v10.4s, v10.4s, v16.4s\n" + "smin v11.4s, v11.4s, v16.4s\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v16.4s }, [x20]\n" + "smax v8.4s, v8.4s, v16.4s\n" + "smax v9.4s, v9.4s, v16.4s\n" + "smax v10.4s, v10.4s, v16.4s\n" + "smax v11.4s, v11.4s, v16.4s\n" "uzp1 v8.8h, v8.8h, v9.8h\n" - "uzp1 v9.8h, v10.8h, v11.8h\n" + "uzp1 v16.8h, v10.8h, v11.8h\n" "cmp x16, #0x10\n" - "uzp1 v8.16b, v8.16b, v9.16b\n" + "uzp1 v8.16b, v8.16b, v16.16b\n" "bge 26f\n" "tbz x16, #3, 21f\n" "str d8, [x17], #0x8\n" @@ -433,247 +432,247 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "31:" // Height 2: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w13, [x20, x14, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 32f\n" - "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x12, [x21, #0x0]\n" - "ldr x9, [x21, #0x8]\n" + "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x12, [x20, #0x0]\n" + "ldr x11, [x20, #0x8]\n" "cbnz x14, 33f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x12, x12, x20\n" - "add x9, x9, x20\n" + "add x11, x11, x20\n" "b 33f\n" "32:" // Height 2: setup direct input "mov x12, %x[input_ptr]\n" - "add x9, x12, x20\n" + "add x11, x12, x21\n" "33:" // Height 2: input setup done "cmp x13, #0x10\n" "blt 36f\n" "ldr q0, [x12, #0x0]\n" "cmp x13, #0x20\n" - "ldr q1, [x9, #0x0]\n" + "ldr q1, [x11, #0x0]\n" "ldr q6, [x15, #0x0]\n" "ldr q7, [x15, #0x10]\n" "blt 35f\n" "34:" // Height 2: Multiply loop: Main loop head ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr x20, [x15, #0x28]\n" + "ldr x21, [x15, #0x28]\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr d6, [x15, #0x20]\n" + "ldr d17, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "ldr x11, [x15, #0x38]\n" + "ldr x20, [x15, #0x38]\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr d7, [x15, #0x30]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "ldr d6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" + "ldr d16, [x15, #0x30]\n" + "mov v17.d[1], x21\n" + ".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n" + "mov v16.d[1], x20\n" + ".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n" + "ldr d17, [x15, #0x40]\n" + ".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n" "ldr x20, [x15, #0x48]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - "ldr d7, [x15, #0x50]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x58]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - "ldr x20, [x15, #0x68]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - "ldr d6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - "ldr x11, [x15, #0x78]\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - "ldr d7, [x15, #0x70]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - "ldr d6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" + ".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n" + "ldr d16, [x15, #0x50]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0x58]\n" + "mov v16.d[1], x20\n" + ".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n" + "ldr x21, [x15, #0x68]\n" + ".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n" + "ldr d17, [x15, #0x60]\n" + ".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n" + "ldr x20, [x15, #0x78]\n" + ".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n" + "ldr d16, [x15, #0x70]\n" + "mov v17.d[1], x21\n" + ".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n" + "mov v16.d[1], x20\n" + ".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n" + "ldr d17, [x15, #0x80]\n" + ".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n" "ldr x20, [x15, #0x88]\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - "ldr d7, [x15, #0x90]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x98]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - "ldr x20, [x15, #0xa8]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - "ldr d6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - "ldr x11, [x15, #0xb8]\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - "ldr d7, [x15, #0xb0]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - "ldr d6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" + ".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n" + "ldr d16, [x15, #0x90]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0x98]\n" + "mov v16.d[1], x20\n" + ".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n" + "ldr x21, [x15, #0xa8]\n" + ".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n" + "ldr d17, [x15, #0xa0]\n" + ".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n" + "ldr x20, [x15, #0xb8]\n" + ".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n" + "ldr d16, [x15, #0xb0]\n" + "mov v17.d[1], x21\n" + ".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n" + "mov v16.d[1], x20\n" + ".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n" + "ldr d17, [x15, #0xc0]\n" + ".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n" "ldr x20, [x15, #0xc8]\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - "ldr d7, [x15, #0xd0]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0xd8]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - "ldr x20, [x15, #0xe8]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - "ldr d6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - "ldr x11, [x15, #0xf8]\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - "ldr d7, [x15, #0xf0]\n" - "mov v6.d[1], x20\n" + ".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n" + "ldr d16, [x15, #0xd0]\n" + "mov v17.d[1], x20\n" + "ldr x20, [x15, #0xd8]\n" + "mov v16.d[1], x20\n" + ".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n" + "ldr x21, [x15, #0xe8]\n" + ".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n" + "ldr d17, [x15, #0xe0]\n" + ".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n" + "ldr x20, [x15, #0xf8]\n" + ".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n" + "ldr d16, [x15, #0xf0]\n" + "mov v17.d[1], x21\n" "add x12, x12, #0x10\n" - "mov v7.d[1], x11\n" - "add x9, x9, #0x10\n" + "mov v16.d[1], x20\n" + "add x11, x11, #0x10\n" "add x15, x15, #0x100\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" + ".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n" + ".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n" "ldr d6, [x15, #0x0]\n" - "ldr x20, [x15, #0x8]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" + "ldr x21, [x15, #0x8]\n" + ".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n" "ldr d0, [x12, #0x0]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - "ldr d1, [x9, #0x0]\n" + ".inst 0x4fa1ea0f // sdot v15.4s, v16.16b, v1.4b[3]\n" + "ldr d1, [x11, #0x0]\n" "sub x13, x13, #0x10\n" "ldr d7, [x15, #0x10]\n" "cmp x13, #0x20\n" - "ldr x10, [x12, #0x8]\n" - "mov v6.d[1], x20\n" - "ldr x28, [x9, #0x8]\n" - "mov v0.d[1], x10\n" - "ldr x11, [x15, #0x18]\n" - "mov v1.d[1], x28\n" + "ldr x20, [x12, #0x8]\n" + "mov v6.d[1], x21\n" + "ldr x21, [x11, #0x8]\n" + "mov v0.d[1], x20\n" + "ldr x20, [x15, #0x18]\n" + "mov v1.d[1], x21\n" "prfm pldl1keep, [x12, #0x80]\n" - "mov v7.d[1], x11\n" - "prfm pldl1keep, [x9, #0x80]\n" + "mov v7.d[1], x20\n" + "prfm pldl1keep, [x11, #0x80]\n" "bge 34b\n" "35:" // Height 2: Multiply loop: Single iteration only ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" "add x12, x12, #0x10\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr q6, [x15, #0x20]\n" + "ldr q17, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "add x9, x9, #0x10\n" + "add x11, x11, #0x10\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x30]\n" + ".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n" "sub x13, x13, #0x10\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "ldr q6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" + ".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n" + "ldr q17, [x15, #0x40]\n" + ".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n" "prfm pldl1keep, [x12, #0x80]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - "ldr q7, [x15, #0x50]\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - "prfm pldl1keep, [x9, #0x80]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - "ldr q6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - "ldr q7, [x15, #0x70]\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - "ldr q6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - "ldr q7, [x15, #0x90]\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - "ldr q6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - "ldr q7, [x15, #0xb0]\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - "ldr q6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - "ldr q7, [x15, #0xd0]\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - "ldr q6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - "ldr q7, [x15, #0xf0]\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" + ".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n" + "ldr q16, [x15, #0x50]\n" + ".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n" + "prfm pldl1keep, [x11, #0x80]\n" + ".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n" + "ldr q17, [x15, #0x60]\n" + ".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n" + ".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n" + "ldr q16, [x15, #0x70]\n" + ".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n" + ".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n" + "ldr q17, [x15, #0x80]\n" + ".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n" + ".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n" + "ldr q16, [x15, #0x90]\n" + ".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n" + ".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n" + "ldr q17, [x15, #0xa0]\n" + ".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n" + ".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n" + "ldr q16, [x15, #0xb0]\n" + ".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n" + ".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n" + "ldr q17, [x15, #0xc0]\n" + ".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n" + ".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n" + "ldr q16, [x15, #0xd0]\n" + ".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n" + ".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n" + "ldr q17, [x15, #0xe0]\n" + ".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n" + ".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n" + "ldr q16, [x15, #0xf0]\n" + ".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n" "add x15, x15, #0x100\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" + ".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n" + ".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n" + ".inst 0x4fa1ea0f // sdot v15.4s, v16.16b, v1.4b[3]\n" "36:" // Height 2: Multiply loop: Main loop skip "cbz x13, 41f\n" "cmp x13, #0x4\n" "blt 38f\n" "37:" // Height 2: Multiply loop: Odd block loop - "ldr s0, [x12], #0x4\n" + "ldr s19, [x12], #0x4\n" "sub x13, x13, #0x4\n" - "ldr s1, [x9], #0x4\n" + "ldr s18, [x11], #0x4\n" "cmp x13, #0x4\n" - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q17, [x15, #0x0]\n" + ".inst 0x4f93e228 // sdot v8.4s, v17.16b, v19.4b[0]\n" + "ldr q16, [x15, #0x10]\n" + ".inst 0x4f92e22c // sdot v12.4s, v17.16b, v18.4b[0]\n" + "ldr q17, [x15, #0x20]\n" + ".inst 0x4f93e209 // sdot v9.4s, v16.16b, v19.4b[0]\n" + ".inst 0x4f92e20d // sdot v13.4s, v16.16b, v18.4b[0]\n" + "ldr q16, [x15, #0x30]\n" + ".inst 0x4f93e22a // sdot v10.4s, v17.16b, v19.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" + ".inst 0x4f92e22e // sdot v14.4s, v17.16b, v18.4b[0]\n" + ".inst 0x4f93e20b // sdot v11.4s, v16.16b, v19.4b[0]\n" + ".inst 0x4f92e20f // sdot v15.4s, v16.16b, v18.4b[0]\n" "bge 37b\n" "38:" // Height 2: Multiply loop: Skip odd blocks "cbz x13, 41f\n" "tbz x13, #1, 39f\n" "ldr h0, [x12], #0x2\n" - "ldr h1, [x9], #0x2\n" + "ldr h1, [x11], #0x2\n" "tbz x13, #0, 40f\n" "ld1 { v0.b }[2], [x12]\n" - "ld1 { v1.b }[2], [x9]\n" + "ld1 { v1.b }[2], [x11]\n" "b 40f\n" "39:" // Height 2: Multiply loop: Ragged operand read: partial_1_0 "ldr b0, [x12, #0x0]\n" - "ldr b1, [x9, #0x0]\n" + "ldr b1, [x11, #0x0]\n" "40:" // Height 2: Multiply loop: Ragged operand read: Done - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q17, [x15, #0x0]\n" + ".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n" + "ldr q16, [x15, #0x10]\n" + ".inst 0x4f81e22c // sdot v12.4s, v17.16b, v1.4b[0]\n" + "ldr q17, [x15, #0x20]\n" + ".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n" + ".inst 0x4f81e20d // sdot v13.4s, v16.16b, v1.4b[0]\n" + "ldr q16, [x15, #0x30]\n" + ".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" + ".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n" + ".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n" + ".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n" "41:" // Height 2: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x14, x14, #0x1\n" "cmp x14, x20\n" "bne 31b\n" - "ldr q0, [x6, #0x0]\n" - "add v8.4s, v8.4s, v0.4s\n" - "ldr q1, [x6, #0x10]\n" - "add v9.4s, v9.4s, v1.4s\n" - "ldr q2, [x6, #0x20]\n" - "add v10.4s, v10.4s, v2.4s\n" - "ldr q3, [x6, #0x30]\n" - "add v11.4s, v11.4s, v3.4s\n" + "ldr q19, [x6, #0x0]\n" + "add v8.4s, v8.4s, v19.4s\n" + "ldr q18, [x6, #0x10]\n" + "add v9.4s, v9.4s, v18.4s\n" + "ldr q17, [x6, #0x20]\n" + "add v10.4s, v10.4s, v17.4s\n" + "ldr q16, [x6, #0x30]\n" + "add v11.4s, v11.4s, v16.4s\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "add x24, x17, x20\n" + "add x25, x17, x20\n" "prfm pstl1keep, [x17, #0x0]\n" - "add v12.4s, v12.4s, v0.4s\n" - "prfm pstl1keep, [x24, #0x0]\n" - "add v13.4s, v13.4s, v1.4s\n" - "add v14.4s, v14.4s, v2.4s\n" - "add v15.4s, v15.4s, v3.4s\n" + "add v12.4s, v12.4s, v19.4s\n" + "prfm pstl1keep, [x25, #0x0]\n" + "add v13.4s, v13.4s, v18.4s\n" + "add v14.4s, v14.4s, v17.4s\n" + "add v15.4s, v15.4s, v16.4s\n" "add x6, x6, #0x40\n" "tbz %x[flags], #4, 42f\n" "ldr q0, [x8, #0x0]\n" @@ -688,10 +687,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "add x7, x7, #0x40\n" "b 43f\n" "42:" // Height 2: per layer parameters - "add x25, %x[qp], %[per_layer_right_shift]\n" - "ld1r { v0.4s }, [x25]\n" - "add x25, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x25]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "ld1r { v0.4s }, [x20]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v4.4s }, [x20]\n" "mov v1.16b, v0.16b\n" "mov v5.16b, v4.16b\n" "mov v2.16b, v0.16b\n" @@ -708,30 +707,30 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "sqrdmulh v14.4s, v14.4s, v6.4s\n" "sqrdmulh v15.4s, v15.4s, v7.4s\n" "tbz %x[flags], #5, 44f\n" - "and v4.16b, v8.16b, v0.16b\n" - "and v5.16b, v9.16b, v1.16b\n" - "and v6.16b, v10.16b, v2.16b\n" - "and v7.16b, v11.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v8.4s, v8.4s, v4.4s\n" - "sqadd v9.4s, v9.4s, v5.4s\n" - "sqadd v10.4s, v10.4s, v6.4s\n" - "sqadd v11.4s, v11.4s, v7.4s\n" - "and v4.16b, v12.16b, v0.16b\n" - "and v5.16b, v13.16b, v1.16b\n" - "and v6.16b, v14.16b, v2.16b\n" - "and v7.16b, v15.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v12.4s, v12.4s, v4.4s\n" - "sqadd v13.4s, v13.4s, v5.4s\n" - "sqadd v14.4s, v14.4s, v6.4s\n" - "sqadd v15.4s, v15.4s, v7.4s\n" + "and v19.16b, v8.16b, v0.16b\n" + "and v18.16b, v9.16b, v1.16b\n" + "and v17.16b, v10.16b, v2.16b\n" + "and v16.16b, v11.16b, v3.16b\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sshr v18.4s, v18.4s, #0x1f\n" + "sshr v17.4s, v17.4s, #0x1f\n" + "sshr v16.4s, v16.4s, #0x1f\n" + "sqadd v8.4s, v8.4s, v19.4s\n" + "sqadd v9.4s, v9.4s, v18.4s\n" + "sqadd v10.4s, v10.4s, v17.4s\n" + "sqadd v11.4s, v11.4s, v16.4s\n" + "and v19.16b, v12.16b, v0.16b\n" + "and v18.16b, v13.16b, v1.16b\n" + "and v17.16b, v14.16b, v2.16b\n" + "and v16.16b, v15.16b, v3.16b\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sshr v18.4s, v18.4s, #0x1f\n" + "sshr v17.4s, v17.4s, #0x1f\n" + "sshr v16.4s, v16.4s, #0x1f\n" + "sqadd v12.4s, v12.4s, v19.4s\n" + "sqadd v13.4s, v13.4s, v18.4s\n" + "sqadd v14.4s, v14.4s, v17.4s\n" + "sqadd v15.4s, v15.4s, v16.4s\n" "44:" // Height 2: no shift correction "srshl v8.4s, v8.4s, v0.4s\n" "srshl v9.4s, v9.4s, v1.4s\n" @@ -741,108 +740,108 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "srshl v13.4s, v13.4s, v1.4s\n" "srshl v14.4s, v14.4s, v2.4s\n" "srshl v15.4s, v15.4s, v3.4s\n" - "add x25, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x25]\n" - "add v8.4s, v8.4s, v4.4s\n" - "add v9.4s, v9.4s, v4.4s\n" - "add v10.4s, v10.4s, v4.4s\n" - "add v11.4s, v11.4s, v4.4s\n" - "add v12.4s, v12.4s, v4.4s\n" - "add v13.4s, v13.4s, v4.4s\n" - "add v14.4s, v14.4s, v4.4s\n" - "add v15.4s, v15.4s, v4.4s\n" - "add x25, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x25]\n" - "smin v8.4s, v8.4s, v6.4s\n" - "smin v9.4s, v9.4s, v6.4s\n" - "smin v10.4s, v10.4s, v6.4s\n" - "smin v11.4s, v11.4s, v6.4s\n" - "smin v12.4s, v12.4s, v6.4s\n" - "smin v13.4s, v13.4s, v6.4s\n" - "smin v14.4s, v14.4s, v6.4s\n" - "smin v15.4s, v15.4s, v6.4s\n" - "add x25, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x25]\n" - "smax v8.4s, v8.4s, v5.4s\n" - "smax v9.4s, v9.4s, v5.4s\n" - "smax v10.4s, v10.4s, v5.4s\n" - "smax v11.4s, v11.4s, v5.4s\n" - "smax v12.4s, v12.4s, v5.4s\n" - "smax v13.4s, v13.4s, v5.4s\n" - "smax v14.4s, v14.4s, v5.4s\n" - "smax v15.4s, v15.4s, v5.4s\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v16.4s }, [x20]\n" + "add v8.4s, v8.4s, v16.4s\n" + "add v9.4s, v9.4s, v16.4s\n" + "add v10.4s, v10.4s, v16.4s\n" + "add v11.4s, v11.4s, v16.4s\n" + "add v12.4s, v12.4s, v16.4s\n" + "add v13.4s, v13.4s, v16.4s\n" + "add v14.4s, v14.4s, v16.4s\n" + "add v15.4s, v15.4s, v16.4s\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v16.4s }, [x20]\n" + "smin v8.4s, v8.4s, v16.4s\n" + "smin v9.4s, v9.4s, v16.4s\n" + "smin v10.4s, v10.4s, v16.4s\n" + "smin v11.4s, v11.4s, v16.4s\n" + "smin v12.4s, v12.4s, v16.4s\n" + "smin v13.4s, v13.4s, v16.4s\n" + "smin v14.4s, v14.4s, v16.4s\n" + "smin v15.4s, v15.4s, v16.4s\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v16.4s }, [x20]\n" + "smax v8.4s, v8.4s, v16.4s\n" + "smax v9.4s, v9.4s, v16.4s\n" + "smax v10.4s, v10.4s, v16.4s\n" + "smax v11.4s, v11.4s, v16.4s\n" + "smax v12.4s, v12.4s, v16.4s\n" + "smax v13.4s, v13.4s, v16.4s\n" + "smax v14.4s, v14.4s, v16.4s\n" + "smax v15.4s, v15.4s, v16.4s\n" "uzp1 v8.8h, v8.8h, v9.8h\n" - "uzp1 v9.8h, v10.8h, v11.8h\n" + "uzp1 v17.8h, v10.8h, v11.8h\n" "uzp1 v12.8h, v12.8h, v13.8h\n" - "uzp1 v13.8h, v14.8h, v15.8h\n" + "uzp1 v16.8h, v14.8h, v15.8h\n" "cmp x16, #0x10\n" - "uzp1 v8.16b, v8.16b, v9.16b\n" - "uzp1 v12.16b, v12.16b, v13.16b\n" + "uzp1 v8.16b, v8.16b, v17.16b\n" + "uzp1 v12.16b, v12.16b, v16.16b\n" "bge 53f\n" "tbz x16, #3, 48f\n" "str d8, [x17], #0x8\n" - "str d12, [x24], #0x8\n" + "str d12, [x25], #0x8\n" "tbz x16, #2, 46f\n" "st1 { v8.s }[2], [x17], #0x4\n" - "st1 { v12.s }[2], [x24], #0x4\n" + "st1 { v12.s }[2], [x25], #0x4\n" "tbz x16, #1, 45f\n" "st1 { v8.h }[6], [x17], #0x2\n" - "st1 { v12.h }[6], [x24], #0x2\n" + "st1 { v12.h }[6], [x25], #0x2\n" "tbz x16, #0, 52f\n" "st1 { v8.b }[14], [x17]\n" - "st1 { v12.b }[14], [x24]\n" + "st1 { v12.b }[14], [x25]\n" "b 52f\n" "45:" // Height 2: Partial direct writeback: partial_1_12 "tbz x16, #0, 52f\n" "st1 { v8.b }[12], [x17]\n" - "st1 { v12.b }[12], [x24]\n" + "st1 { v12.b }[12], [x25]\n" "b 52f\n" "46:" // Height 2: Partial direct writeback: partial_2_8 "tbz x16, #1, 47f\n" "st1 { v8.h }[4], [x17], #0x2\n" - "st1 { v12.h }[4], [x24], #0x2\n" + "st1 { v12.h }[4], [x25], #0x2\n" "tbz x16, #0, 52f\n" "st1 { v8.b }[10], [x17]\n" - "st1 { v12.b }[10], [x24]\n" + "st1 { v12.b }[10], [x25]\n" "b 52f\n" "47:" // Height 2: Partial direct writeback: partial_1_8 "tbz x16, #0, 52f\n" "st1 { v8.b }[8], [x17]\n" - "st1 { v12.b }[8], [x24]\n" + "st1 { v12.b }[8], [x25]\n" "b 52f\n" "48:" // Height 2: Partial direct writeback: partial_4_0 "tbz x16, #2, 50f\n" "str s8, [x17], #0x4\n" - "str s12, [x24], #0x4\n" + "str s12, [x25], #0x4\n" "tbz x16, #1, 49f\n" "st1 { v8.h }[2], [x17], #0x2\n" - "st1 { v12.h }[2], [x24], #0x2\n" + "st1 { v12.h }[2], [x25], #0x2\n" "tbz x16, #0, 52f\n" "st1 { v8.b }[6], [x17]\n" - "st1 { v12.b }[6], [x24]\n" + "st1 { v12.b }[6], [x25]\n" "b 52f\n" "49:" // Height 2: Partial direct writeback: partial_1_4 "tbz x16, #0, 52f\n" "st1 { v8.b }[4], [x17]\n" - "st1 { v12.b }[4], [x24]\n" + "st1 { v12.b }[4], [x25]\n" "b 52f\n" "50:" // Height 2: Partial direct writeback: partial_2_0 "tbz x16, #1, 51f\n" "str h8, [x17], #0x2\n" - "str h12, [x24], #0x2\n" + "str h12, [x25], #0x2\n" "tbz x16, #0, 52f\n" "st1 { v8.b }[2], [x17]\n" - "st1 { v12.b }[2], [x24]\n" + "st1 { v12.b }[2], [x25]\n" "b 52f\n" "51:" // Height 2: Partial direct writeback: partial_1_0 "str b8, [x17, #0x0]\n" - "str b12, [x24, #0x0]\n" + "str b12, [x25, #0x0]\n" "52:" // Height 2: Partial direct writeback: Done "b 54f\n" "53:" // Height 2: Full writeback "str q8, [x17, #0x0]\n" "add x17, x17, #0x10\n" - "str q12, [x24, #0x0]\n" + "str q12, [x25, #0x0]\n" "54:" // Height 2: Writeback done "subs x16, x16, #0x10\n" "bgt 29b\n" @@ -872,308 +871,308 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "58:" // Height 3: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w13, [x20, x14, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 59f\n" - "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x12, [x21, #0x0]\n" - "ldr x9, [x21, #0x8]\n" - "ldr x27, [x21, #0x10]\n" + "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x12, [x20, #0x0]\n" + "ldr x11, [x20, #0x8]\n" + "ldr x10, [x20, #0x10]\n" "cbnz x14, 60f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x12, x12, x20\n" - "add x9, x9, x20\n" - "add x27, x27, x20\n" + "add x11, x11, x20\n" + "add x10, x10, x20\n" "b 60f\n" "59:" // Height 3: setup direct input "mov x12, %x[input_ptr]\n" - "add x9, x12, x20\n" - "add x27, x9, x20\n" + "add x11, x12, x21\n" + "add x10, x11, x21\n" "60:" // Height 3: input setup done "cmp x13, #0x10\n" "blt 63f\n" "ldr q0, [x12, #0x0]\n" "cmp x13, #0x20\n" - "ldr q1, [x9, #0x0]\n" - "ldr q2, [x27, #0x0]\n" + "ldr q1, [x11, #0x0]\n" + "ldr q2, [x10, #0x0]\n" "ldr q6, [x15, #0x0]\n" "ldr q7, [x15, #0x10]\n" "blt 62f\n" "61:" // Height 3: Multiply loop: Main loop head ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr x20, [x15, #0x28]\n" + "ldr x21, [x15, #0x28]\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x38]\n" + "ldr x20, [x15, #0x38]\n" ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "ldr d6, [x15, #0x20]\n" + "ldr d21, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" + "mov v21.d[1], x21\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x48]\n" + "ldr x21, [x15, #0x48]\n" ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "ldr d7, [x15, #0x30]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x58]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "ldr d6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x68]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - "ldr d7, [x15, #0x50]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x78]\n" - ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n" - "ldr d6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" + "ldr d20, [x15, #0x30]\n" + "mov v20.d[1], x20\n" + ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n" + ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n" + "ldr x20, [x15, #0x58]\n" + ".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n" + "ldr d21, [x15, #0x40]\n" + ".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n" + "mov v21.d[1], x21\n" + ".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n" + "ldr x21, [x15, #0x68]\n" + ".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n" + "ldr d20, [x15, #0x50]\n" + "mov v20.d[1], x20\n" + ".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n" + ".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n" + "ldr x20, [x15, #0x78]\n" + ".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n" + "ldr d21, [x15, #0x60]\n" + ".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n" + "mov v21.d[1], x21\n" + ".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n" + "ldr x21, [x15, #0x88]\n" + ".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n" + "ldr d20, [x15, #0x70]\n" + "mov v20.d[1], x20\n" + ".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n" + ".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n" + "ldr x20, [x15, #0x98]\n" + ".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n" + "ldr d21, [x15, #0x80]\n" + ".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n" + "mov v21.d[1], x21\n" + ".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n" + "ldr x21, [x15, #0xa8]\n" + ".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n" + "ldr d20, [x15, #0x90]\n" + "mov v20.d[1], x20\n" + ".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n" + ".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n" + "ldr x20, [x15, #0xb8]\n" + ".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n" + "ldr d21, [x15, #0xa0]\n" + ".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n" + "mov v21.d[1], x21\n" + ".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n" + "ldr x21, [x15, #0xc8]\n" + ".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n" + "ldr d20, [x15, #0xb0]\n" + "mov v20.d[1], x20\n" + ".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n" + ".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n" + "ldr x20, [x15, #0xd8]\n" + ".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n" + "ldr d21, [x15, #0xc0]\n" + ".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n" + "mov v21.d[1], x21\n" + ".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n" + "ldr x21, [x15, #0xe8]\n" + ".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n" + "ldr d20, [x15, #0xd0]\n" + "mov v20.d[1], x20\n" + ".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n" + ".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n" + "ldr x20, [x15, #0xf8]\n" + ".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n" + "ldr d21, [x15, #0xe0]\n" + ".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n" + "mov v21.d[1], x21\n" + ".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n" + "add x12, x12, #0x10\n" + ".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n" + "ldr d20, [x15, #0xf0]\n" + "mov v20.d[1], x20\n" + "add x11, x11, #0x10\n" + "add x10, x10, #0x10\n" + "add x15, x15, #0x100\n" + ".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n" + "ldr x20, [x15, #0x8]\n" + ".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n" + "ldr x23, [x12, #0x8]\n" + ".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n" + "ldr d6, [x15, #0x0]\n" + ".inst 0x4fa0ea8b // sdot v11.4s, v20.16b, v0.4b[3]\n" + "ldr d0, [x12, #0x0]\n" + ".inst 0x4fa1ea8f // sdot v15.4s, v20.16b, v1.4b[3]\n" + "ldr d1, [x11, #0x0]\n" + "ldr x22, [x11, #0x8]\n" + ".inst 0x4fa2ea93 // sdot v19.4s, v20.16b, v2.4b[3]\n" + "ldr d2, [x10, #0x0]\n" + "sub x13, x13, #0x10\n" + "ldr d7, [x15, #0x10]\n" + "cmp x13, #0x20\n" + "ldr x21, [x10, #0x8]\n" "mov v6.d[1], x20\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0x88]\n" - ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n" - "ldr d7, [x15, #0x70]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x98]\n" - ".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n" - "ldr d6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0xa8]\n" - ".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n" - "ldr d7, [x15, #0x90]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xb8]\n" - ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n" - "ldr d6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xc8]\n" - ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n" - "ldr d7, [x15, #0xb0]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xd8]\n" - ".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n" - "ldr d6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xe8]\n" - ".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n" - "ldr d7, [x15, #0xd0]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - "ldr x11, [x15, #0xf8]\n" - ".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n" - "ldr d6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - "add x12, x12, #0x10\n" - ".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n" - "ldr d7, [x15, #0xf0]\n" - "mov v7.d[1], x11\n" - "add x9, x9, #0x10\n" - "add x27, x27, #0x10\n" - "add x15, x15, #0x100\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" - "ldr x20, [x15, #0x8]\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" - "ldr x10, [x12, #0x8]\n" - ".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n" - "ldr d6, [x15, #0x0]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" - "ldr d0, [x12, #0x0]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - "ldr d1, [x9, #0x0]\n" - "ldr x28, [x9, #0x8]\n" - ".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n" - "ldr d2, [x27, #0x0]\n" - "sub x13, x13, #0x10\n" - "ldr d7, [x15, #0x10]\n" - "cmp x13, #0x20\n" - "ldr x26, [x27, #0x8]\n" - "mov v6.d[1], x20\n" - "ldr x11, [x15, #0x18]\n" - "mov v0.d[1], x10\n" + "ldr x20, [x15, #0x18]\n" + "mov v0.d[1], x23\n" "prfm pldl1keep, [x12, #0x80]\n" - "mov v1.d[1], x28\n" - "prfm pldl1keep, [x9, #0x80]\n" - "mov v2.d[1], x26\n" - "prfm pldl1keep, [x27, #0x80]\n" - "mov v7.d[1], x11\n" + "mov v1.d[1], x22\n" + "prfm pldl1keep, [x11, #0x80]\n" + "mov v2.d[1], x21\n" + "prfm pldl1keep, [x10, #0x80]\n" + "mov v7.d[1], x20\n" "bge 61b\n" "62:" // Height 3: Multiply loop: Single iteration only ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" "add x12, x12, #0x10\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "add x9, x9, #0x10\n" + "add x11, x11, #0x10\n" ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "ldr q6, [x15, #0x20]\n" + "ldr q21, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "add x27, x27, #0x10\n" + "add x10, x10, #0x10\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" "sub x13, x13, #0x10\n" ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q20, [x15, #0x30]\n" + ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n" "prfm pldl1keep, [x12, #0x80]\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "prfm pldl1keep, [x9, #0x80]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "ldr q6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - "prfm pldl1keep, [x27, #0x80]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - "ldr q7, [x15, #0x50]\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n" - "ldr q6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n" - "ldr q7, [x15, #0x70]\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - ".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n" - "ldr q6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - ".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n" - "ldr q7, [x15, #0x90]\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n" - "ldr q6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n" - "ldr q7, [x15, #0xb0]\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - ".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n" - "ldr q6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - ".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n" - "ldr q7, [x15, #0xd0]\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - ".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n" - "ldr q6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - ".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n" - "ldr q7, [x15, #0xf0]\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" + ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n" + "prfm pldl1keep, [x11, #0x80]\n" + ".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n" + "ldr q21, [x15, #0x40]\n" + ".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n" + "prfm pldl1keep, [x10, #0x80]\n" + ".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n" + ".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n" + "ldr q20, [x15, #0x50]\n" + ".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n" + ".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n" + ".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n" + "ldr q21, [x15, #0x60]\n" + ".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n" + ".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n" + ".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n" + "ldr q20, [x15, #0x70]\n" + ".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n" + ".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n" + ".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n" + "ldr q21, [x15, #0x80]\n" + ".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n" + ".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n" + ".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n" + "ldr q20, [x15, #0x90]\n" + ".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n" + ".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n" + ".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n" + "ldr q21, [x15, #0xa0]\n" + ".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n" + ".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n" + ".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n" + "ldr q20, [x15, #0xb0]\n" + ".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n" + ".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n" + ".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n" + "ldr q21, [x15, #0xc0]\n" + ".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n" + ".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n" + ".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n" + "ldr q20, [x15, #0xd0]\n" + ".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n" + ".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n" + ".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n" + "ldr q21, [x15, #0xe0]\n" + ".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n" + ".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n" + ".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n" + "ldr q20, [x15, #0xf0]\n" + ".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n" "add x15, x15, #0x100\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" - ".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - ".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n" + ".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n" + ".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n" + ".inst 0x4fa0ea8b // sdot v11.4s, v20.16b, v0.4b[3]\n" + ".inst 0x4fa1ea8f // sdot v15.4s, v20.16b, v1.4b[3]\n" + ".inst 0x4fa2ea93 // sdot v19.4s, v20.16b, v2.4b[3]\n" "63:" // Height 3: Multiply loop: Main loop skip "cbz x13, 68f\n" "cmp x13, #0x4\n" "blt 65f\n" "64:" // Height 3: Multiply loop: Odd block loop - "ldr s0, [x12], #0x4\n" + "ldr s24, [x12], #0x4\n" "sub x13, x13, #0x4\n" - "ldr s1, [x9], #0x4\n" + "ldr s23, [x11], #0x4\n" "cmp x13, #0x4\n" - "ldr s2, [x27], #0x4\n" - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr s22, [x10], #0x4\n" + "ldr q21, [x15, #0x0]\n" + ".inst 0x4f98e2a8 // sdot v8.4s, v21.16b, v24.4b[0]\n" + "ldr q20, [x15, #0x10]\n" + ".inst 0x4f97e2ac // sdot v12.4s, v21.16b, v23.4b[0]\n" + ".inst 0x4f96e2b0 // sdot v16.4s, v21.16b, v22.4b[0]\n" + "ldr q21, [x15, #0x20]\n" + ".inst 0x4f98e289 // sdot v9.4s, v20.16b, v24.4b[0]\n" + ".inst 0x4f97e28d // sdot v13.4s, v20.16b, v23.4b[0]\n" + ".inst 0x4f96e291 // sdot v17.4s, v20.16b, v22.4b[0]\n" + "ldr q20, [x15, #0x30]\n" + ".inst 0x4f98e2aa // sdot v10.4s, v21.16b, v24.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" + ".inst 0x4f97e2ae // sdot v14.4s, v21.16b, v23.4b[0]\n" + ".inst 0x4f96e2b2 // sdot v18.4s, v21.16b, v22.4b[0]\n" + ".inst 0x4f98e28b // sdot v11.4s, v20.16b, v24.4b[0]\n" + ".inst 0x4f97e28f // sdot v15.4s, v20.16b, v23.4b[0]\n" + ".inst 0x4f96e293 // sdot v19.4s, v20.16b, v22.4b[0]\n" "bge 64b\n" "65:" // Height 3: Multiply loop: Skip odd blocks "cbz x13, 68f\n" "tbz x13, #1, 66f\n" "ldr h0, [x12], #0x2\n" - "ldr h1, [x9], #0x2\n" - "ldr h2, [x27], #0x2\n" + "ldr h1, [x11], #0x2\n" + "ldr h2, [x10], #0x2\n" "tbz x13, #0, 67f\n" "ld1 { v0.b }[2], [x12]\n" - "ld1 { v1.b }[2], [x9]\n" - "ld1 { v2.b }[2], [x27]\n" + "ld1 { v1.b }[2], [x11]\n" + "ld1 { v2.b }[2], [x10]\n" "b 67f\n" "66:" // Height 3: Multiply loop: Ragged operand read: partial_1_0 "ldr b0, [x12, #0x0]\n" - "ldr b1, [x9, #0x0]\n" - "ldr b2, [x27, #0x0]\n" + "ldr b1, [x11, #0x0]\n" + "ldr b2, [x10, #0x0]\n" "67:" // Height 3: Multiply loop: Ragged operand read: Done - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q21, [x15, #0x0]\n" + ".inst 0x4f80e2a8 // sdot v8.4s, v21.16b, v0.4b[0]\n" + "ldr q20, [x15, #0x10]\n" + ".inst 0x4f81e2ac // sdot v12.4s, v21.16b, v1.4b[0]\n" + ".inst 0x4f82e2b0 // sdot v16.4s, v21.16b, v2.4b[0]\n" + "ldr q21, [x15, #0x20]\n" + ".inst 0x4f80e289 // sdot v9.4s, v20.16b, v0.4b[0]\n" + ".inst 0x4f81e28d // sdot v13.4s, v20.16b, v1.4b[0]\n" + ".inst 0x4f82e291 // sdot v17.4s, v20.16b, v2.4b[0]\n" + "ldr q20, [x15, #0x30]\n" + ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" + ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n" + ".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n" + ".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n" + ".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n" + ".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n" "68:" // Height 3: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x14, x14, #0x1\n" "cmp x14, x20\n" "bne 58b\n" - "ldr q0, [x6, #0x0]\n" - "add v8.4s, v8.4s, v0.4s\n" - "ldr q1, [x6, #0x10]\n" - "add v9.4s, v9.4s, v1.4s\n" - "ldr q2, [x6, #0x20]\n" - "add v10.4s, v10.4s, v2.4s\n" - "ldr q3, [x6, #0x30]\n" - "add v11.4s, v11.4s, v3.4s\n" + "ldr q23, [x6, #0x0]\n" + "add v8.4s, v8.4s, v23.4s\n" + "ldr q22, [x6, #0x10]\n" + "add v9.4s, v9.4s, v22.4s\n" + "ldr q21, [x6, #0x20]\n" + "add v10.4s, v10.4s, v21.4s\n" + "ldr q20, [x6, #0x30]\n" + "add v11.4s, v11.4s, v20.4s\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "add x24, x17, x20\n" - "add x23, x24, x20\n" + "add x25, x17, x20\n" + "add x24, x25, x20\n" "prfm pstl1keep, [x17, #0x0]\n" + "prfm pstl1keep, [x25, #0x0]\n" + "add v12.4s, v12.4s, v23.4s\n" "prfm pstl1keep, [x24, #0x0]\n" - "add v12.4s, v12.4s, v0.4s\n" - "prfm pstl1keep, [x23, #0x0]\n" - "add v13.4s, v13.4s, v1.4s\n" - "add v14.4s, v14.4s, v2.4s\n" - "add v15.4s, v15.4s, v3.4s\n" - "add v16.4s, v16.4s, v0.4s\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add v19.4s, v19.4s, v3.4s\n" + "add v13.4s, v13.4s, v22.4s\n" + "add v14.4s, v14.4s, v21.4s\n" + "add v15.4s, v15.4s, v20.4s\n" + "add v16.4s, v16.4s, v23.4s\n" + "add v17.4s, v17.4s, v22.4s\n" + "add v18.4s, v18.4s, v21.4s\n" + "add v19.4s, v19.4s, v20.4s\n" "add x6, x6, #0x40\n" "tbz %x[flags], #4, 69f\n" "ldr q0, [x8, #0x0]\n" @@ -1188,10 +1187,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "add x7, x7, #0x40\n" "b 70f\n" "69:" // Height 3: per layer parameters - "add x25, %x[qp], %[per_layer_right_shift]\n" - "ld1r { v0.4s }, [x25]\n" - "add x25, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x25]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "ld1r { v0.4s }, [x20]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v4.4s }, [x20]\n" "mov v1.16b, v0.16b\n" "mov v5.16b, v4.16b\n" "mov v2.16b, v0.16b\n" @@ -1212,42 +1211,42 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "sqrdmulh v18.4s, v18.4s, v6.4s\n" "sqrdmulh v19.4s, v19.4s, v7.4s\n" "tbz %x[flags], #5, 71f\n" - "and v4.16b, v8.16b, v0.16b\n" - "and v5.16b, v9.16b, v1.16b\n" - "and v6.16b, v10.16b, v2.16b\n" - "and v7.16b, v11.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v8.4s, v8.4s, v4.4s\n" - "sqadd v9.4s, v9.4s, v5.4s\n" - "sqadd v10.4s, v10.4s, v6.4s\n" - "sqadd v11.4s, v11.4s, v7.4s\n" - "and v4.16b, v12.16b, v0.16b\n" - "and v5.16b, v13.16b, v1.16b\n" - "and v6.16b, v14.16b, v2.16b\n" - "and v7.16b, v15.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v12.4s, v12.4s, v4.4s\n" - "sqadd v13.4s, v13.4s, v5.4s\n" - "sqadd v14.4s, v14.4s, v6.4s\n" - "sqadd v15.4s, v15.4s, v7.4s\n" - "and v4.16b, v16.16b, v0.16b\n" - "and v5.16b, v17.16b, v1.16b\n" - "and v6.16b, v18.16b, v2.16b\n" - "and v7.16b, v19.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v16.4s, v16.4s, v4.4s\n" - "sqadd v17.4s, v17.4s, v5.4s\n" - "sqadd v18.4s, v18.4s, v6.4s\n" - "sqadd v19.4s, v19.4s, v7.4s\n" + "and v23.16b, v8.16b, v0.16b\n" + "and v22.16b, v9.16b, v1.16b\n" + "and v21.16b, v10.16b, v2.16b\n" + "and v20.16b, v11.16b, v3.16b\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "sqadd v8.4s, v8.4s, v23.4s\n" + "sqadd v9.4s, v9.4s, v22.4s\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sqadd v11.4s, v11.4s, v20.4s\n" + "and v23.16b, v12.16b, v0.16b\n" + "and v22.16b, v13.16b, v1.16b\n" + "and v21.16b, v14.16b, v2.16b\n" + "and v20.16b, v15.16b, v3.16b\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "sqadd v12.4s, v12.4s, v23.4s\n" + "sqadd v13.4s, v13.4s, v22.4s\n" + "sqadd v14.4s, v14.4s, v21.4s\n" + "sqadd v15.4s, v15.4s, v20.4s\n" + "and v23.16b, v16.16b, v0.16b\n" + "and v22.16b, v17.16b, v1.16b\n" + "and v21.16b, v18.16b, v2.16b\n" + "and v20.16b, v19.16b, v3.16b\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "sqadd v16.4s, v16.4s, v23.4s\n" + "sqadd v17.4s, v17.4s, v22.4s\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sqadd v19.4s, v19.4s, v20.4s\n" "71:" // Height 3: no shift correction "srshl v8.4s, v8.4s, v0.4s\n" "srshl v9.4s, v9.4s, v1.4s\n" @@ -1261,139 +1260,139 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "srshl v17.4s, v17.4s, v1.4s\n" "srshl v18.4s, v18.4s, v2.4s\n" "srshl v19.4s, v19.4s, v3.4s\n" - "add x25, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x25]\n" - "add v8.4s, v8.4s, v4.4s\n" - "add v9.4s, v9.4s, v4.4s\n" - "add v10.4s, v10.4s, v4.4s\n" - "add v11.4s, v11.4s, v4.4s\n" - "add v12.4s, v12.4s, v4.4s\n" - "add v13.4s, v13.4s, v4.4s\n" - "add v14.4s, v14.4s, v4.4s\n" - "add v15.4s, v15.4s, v4.4s\n" - "add v16.4s, v16.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" - "add x25, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x25]\n" - "smin v8.4s, v8.4s, v6.4s\n" - "smin v9.4s, v9.4s, v6.4s\n" - "smin v10.4s, v10.4s, v6.4s\n" - "smin v11.4s, v11.4s, v6.4s\n" - "smin v12.4s, v12.4s, v6.4s\n" - "smin v13.4s, v13.4s, v6.4s\n" - "smin v14.4s, v14.4s, v6.4s\n" - "smin v15.4s, v15.4s, v6.4s\n" - "smin v16.4s, v16.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "add x25, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x25]\n" - "smax v8.4s, v8.4s, v5.4s\n" - "smax v9.4s, v9.4s, v5.4s\n" - "smax v10.4s, v10.4s, v5.4s\n" - "smax v11.4s, v11.4s, v5.4s\n" - "smax v12.4s, v12.4s, v5.4s\n" - "smax v13.4s, v13.4s, v5.4s\n" - "smax v14.4s, v14.4s, v5.4s\n" - "smax v15.4s, v15.4s, v5.4s\n" - "smax v16.4s, v16.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v20.4s }, [x20]\n" + "add v8.4s, v8.4s, v20.4s\n" + "add v9.4s, v9.4s, v20.4s\n" + "add v10.4s, v10.4s, v20.4s\n" + "add v11.4s, v11.4s, v20.4s\n" + "add v12.4s, v12.4s, v20.4s\n" + "add v13.4s, v13.4s, v20.4s\n" + "add v14.4s, v14.4s, v20.4s\n" + "add v15.4s, v15.4s, v20.4s\n" + "add v16.4s, v16.4s, v20.4s\n" + "add v17.4s, v17.4s, v20.4s\n" + "add v18.4s, v18.4s, v20.4s\n" + "add v19.4s, v19.4s, v20.4s\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v20.4s }, [x20]\n" + "smin v8.4s, v8.4s, v20.4s\n" + "smin v9.4s, v9.4s, v20.4s\n" + "smin v10.4s, v10.4s, v20.4s\n" + "smin v11.4s, v11.4s, v20.4s\n" + "smin v12.4s, v12.4s, v20.4s\n" + "smin v13.4s, v13.4s, v20.4s\n" + "smin v14.4s, v14.4s, v20.4s\n" + "smin v15.4s, v15.4s, v20.4s\n" + "smin v16.4s, v16.4s, v20.4s\n" + "smin v17.4s, v17.4s, v20.4s\n" + "smin v18.4s, v18.4s, v20.4s\n" + "smin v19.4s, v19.4s, v20.4s\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v20.4s }, [x20]\n" + "smax v8.4s, v8.4s, v20.4s\n" + "smax v9.4s, v9.4s, v20.4s\n" + "smax v10.4s, v10.4s, v20.4s\n" + "smax v11.4s, v11.4s, v20.4s\n" + "smax v12.4s, v12.4s, v20.4s\n" + "smax v13.4s, v13.4s, v20.4s\n" + "smax v14.4s, v14.4s, v20.4s\n" + "smax v15.4s, v15.4s, v20.4s\n" + "smax v16.4s, v16.4s, v20.4s\n" + "smax v17.4s, v17.4s, v20.4s\n" + "smax v18.4s, v18.4s, v20.4s\n" + "smax v19.4s, v19.4s, v20.4s\n" "uzp1 v8.8h, v8.8h, v9.8h\n" - "uzp1 v9.8h, v10.8h, v11.8h\n" + "uzp1 v21.8h, v10.8h, v11.8h\n" "uzp1 v12.8h, v12.8h, v13.8h\n" - "uzp1 v13.8h, v14.8h, v15.8h\n" + "uzp1 v20.8h, v14.8h, v15.8h\n" "uzp1 v16.8h, v16.8h, v17.8h\n" "uzp1 v17.8h, v18.8h, v19.8h\n" "cmp x16, #0x10\n" - "uzp1 v8.16b, v8.16b, v9.16b\n" - "uzp1 v12.16b, v12.16b, v13.16b\n" + "uzp1 v8.16b, v8.16b, v21.16b\n" + "uzp1 v12.16b, v12.16b, v20.16b\n" "uzp1 v16.16b, v16.16b, v17.16b\n" "bge 80f\n" "tbz x16, #3, 75f\n" "str d8, [x17], #0x8\n" - "str d12, [x24], #0x8\n" - "str d16, [x23], #0x8\n" + "str d12, [x25], #0x8\n" + "str d16, [x24], #0x8\n" "tbz x16, #2, 73f\n" "st1 { v8.s }[2], [x17], #0x4\n" - "st1 { v12.s }[2], [x24], #0x4\n" - "st1 { v16.s }[2], [x23], #0x4\n" + "st1 { v12.s }[2], [x25], #0x4\n" + "st1 { v16.s }[2], [x24], #0x4\n" "tbz x16, #1, 72f\n" "st1 { v8.h }[6], [x17], #0x2\n" - "st1 { v12.h }[6], [x24], #0x2\n" - "st1 { v16.h }[6], [x23], #0x2\n" + "st1 { v12.h }[6], [x25], #0x2\n" + "st1 { v16.h }[6], [x24], #0x2\n" "tbz x16, #0, 79f\n" "st1 { v8.b }[14], [x17]\n" - "st1 { v12.b }[14], [x24]\n" - "st1 { v16.b }[14], [x23]\n" + "st1 { v12.b }[14], [x25]\n" + "st1 { v16.b }[14], [x24]\n" "b 79f\n" "72:" // Height 3: Partial direct writeback: partial_1_12 "tbz x16, #0, 79f\n" "st1 { v8.b }[12], [x17]\n" - "st1 { v12.b }[12], [x24]\n" - "st1 { v16.b }[12], [x23]\n" + "st1 { v12.b }[12], [x25]\n" + "st1 { v16.b }[12], [x24]\n" "b 79f\n" "73:" // Height 3: Partial direct writeback: partial_2_8 "tbz x16, #1, 74f\n" "st1 { v8.h }[4], [x17], #0x2\n" - "st1 { v12.h }[4], [x24], #0x2\n" - "st1 { v16.h }[4], [x23], #0x2\n" + "st1 { v12.h }[4], [x25], #0x2\n" + "st1 { v16.h }[4], [x24], #0x2\n" "tbz x16, #0, 79f\n" "st1 { v8.b }[10], [x17]\n" - "st1 { v12.b }[10], [x24]\n" - "st1 { v16.b }[10], [x23]\n" + "st1 { v12.b }[10], [x25]\n" + "st1 { v16.b }[10], [x24]\n" "b 79f\n" "74:" // Height 3: Partial direct writeback: partial_1_8 "tbz x16, #0, 79f\n" "st1 { v8.b }[8], [x17]\n" - "st1 { v12.b }[8], [x24]\n" - "st1 { v16.b }[8], [x23]\n" + "st1 { v12.b }[8], [x25]\n" + "st1 { v16.b }[8], [x24]\n" "b 79f\n" "75:" // Height 3: Partial direct writeback: partial_4_0 "tbz x16, #2, 77f\n" "str s8, [x17], #0x4\n" - "str s12, [x24], #0x4\n" - "str s16, [x23], #0x4\n" + "str s12, [x25], #0x4\n" + "str s16, [x24], #0x4\n" "tbz x16, #1, 76f\n" "st1 { v8.h }[2], [x17], #0x2\n" - "st1 { v12.h }[2], [x24], #0x2\n" - "st1 { v16.h }[2], [x23], #0x2\n" + "st1 { v12.h }[2], [x25], #0x2\n" + "st1 { v16.h }[2], [x24], #0x2\n" "tbz x16, #0, 79f\n" "st1 { v8.b }[6], [x17]\n" - "st1 { v12.b }[6], [x24]\n" - "st1 { v16.b }[6], [x23]\n" + "st1 { v12.b }[6], [x25]\n" + "st1 { v16.b }[6], [x24]\n" "b 79f\n" "76:" // Height 3: Partial direct writeback: partial_1_4 "tbz x16, #0, 79f\n" "st1 { v8.b }[4], [x17]\n" - "st1 { v12.b }[4], [x24]\n" - "st1 { v16.b }[4], [x23]\n" + "st1 { v12.b }[4], [x25]\n" + "st1 { v16.b }[4], [x24]\n" "b 79f\n" "77:" // Height 3: Partial direct writeback: partial_2_0 "tbz x16, #1, 78f\n" "str h8, [x17], #0x2\n" - "str h12, [x24], #0x2\n" - "str h16, [x23], #0x2\n" + "str h12, [x25], #0x2\n" + "str h16, [x24], #0x2\n" "tbz x16, #0, 79f\n" "st1 { v8.b }[2], [x17]\n" - "st1 { v12.b }[2], [x24]\n" - "st1 { v16.b }[2], [x23]\n" + "st1 { v12.b }[2], [x25]\n" + "st1 { v16.b }[2], [x24]\n" "b 79f\n" "78:" // Height 3: Partial direct writeback: partial_1_0 "str b8, [x17, #0x0]\n" - "str b12, [x24, #0x0]\n" - "str b16, [x23, #0x0]\n" + "str b12, [x25, #0x0]\n" + "str b16, [x24, #0x0]\n" "79:" // Height 3: Partial direct writeback: Done "b 81f\n" "80:" // Height 3: Full writeback "str q8, [x17, #0x0]\n" "add x17, x17, #0x10\n" - "str q12, [x24, #0x0]\n" - "str q16, [x23, #0x0]\n" + "str q12, [x25, #0x0]\n" + "str q16, [x24, #0x0]\n" "81:" // Height 3: Writeback done "subs x16, x16, #0x10\n" "bgt 56b\n" @@ -1427,369 +1426,369 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "85:" // Height 4: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w13, [x20, x14, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 86f\n" - "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x12, [x21, #0x0]\n" - "ldr x9, [x21, #0x8]\n" - "ldr x27, [x21, #0x10]\n" - "ldr x25, [x21, #0x18]\n" + "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x12, [x20, #0x0]\n" + "ldr x11, [x20, #0x8]\n" + "ldr x10, [x20, #0x10]\n" + "ldr x9, [x20, #0x18]\n" "cbnz x14, 87f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x12, x12, x20\n" + "add x11, x11, x20\n" + "add x10, x10, x20\n" "add x9, x9, x20\n" - "add x27, x27, x20\n" - "add x25, x25, x20\n" "b 87f\n" "86:" // Height 4: setup direct input "mov x12, %x[input_ptr]\n" - "add x9, x12, x20\n" - "add x27, x9, x20\n" - "add x25, x27, x20\n" + "add x11, x12, x21\n" + "add x10, x11, x21\n" + "add x9, x10, x21\n" "87:" // Height 4: input setup done "cmp x13, #0x10\n" "blt 90f\n" "ldr q0, [x12, #0x0]\n" "cmp x13, #0x20\n" - "ldr q1, [x9, #0x0]\n" - "ldr q2, [x27, #0x0]\n" - "ldr q3, [x25, #0x0]\n" + "ldr q1, [x11, #0x0]\n" + "ldr q2, [x10, #0x0]\n" + "ldr q3, [x9, #0x0]\n" "ldr q6, [x15, #0x0]\n" "ldr q7, [x15, #0x10]\n" "blt 89f\n" "88:" // Height 4: Multiply loop: Main loop head ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr x20, [x15, #0x28]\n" + "ldr x21, [x15, #0x28]\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x38]\n" + "ldr x20, [x15, #0x38]\n" ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" "add x12, x12, #0x10\n" ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "ldr d6, [x15, #0x20]\n" + "ldr d25, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" + "mov v25.d[1], x21\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x48]\n" + "ldr x21, [x15, #0x48]\n" ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "add x9, x9, #0x10\n" + "add x11, x11, #0x10\n" ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "ldr d7, [x15, #0x30]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x58]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "add x27, x27, #0x10\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - "ldr d6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x68]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - "add x25, x25, #0x10\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - "ldr d7, [x15, #0x50]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x78]\n" - ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n" - "ldr x10, [x12, #0x8]\n" - ".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n" - "ldr d6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0x88]\n" - ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n" - "ldr x28, [x9, #0x8]\n" - ".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n" - "ldr d7, [x15, #0x70]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x98]\n" - ".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n" - "ldr x26, [x27, #0x8]\n" - ".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n" - "ldr d6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0xa8]\n" - ".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n" - "ldr x24, [x25, #0x8]\n" - ".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n" - "ldr d7, [x15, #0x90]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xb8]\n" - ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n" + "ldr d24, [x15, #0x30]\n" + "mov v24.d[1], x20\n" + ".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n" + ".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n" + "ldr x20, [x15, #0x58]\n" + ".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n" + "add x10, x10, #0x10\n" + ".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n" + "ldr d25, [x15, #0x40]\n" + ".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n" + "mov v25.d[1], x21\n" + ".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n" + "ldr x21, [x15, #0x68]\n" + ".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n" + "add x9, x9, #0x10\n" + ".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n" + "ldr d24, [x15, #0x50]\n" + "mov v24.d[1], x20\n" + ".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n" + ".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n" + "ldr x20, [x15, #0x78]\n" + ".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n" + "ldr x25, [x12, #0x8]\n" + ".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n" + "ldr d25, [x15, #0x60]\n" + ".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n" + "mov v25.d[1], x21\n" + ".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n" + "ldr x21, [x15, #0x88]\n" + ".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n" + "ldr x24, [x11, #0x8]\n" + ".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n" + "ldr d24, [x15, #0x70]\n" + "mov v24.d[1], x20\n" + ".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n" + ".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n" + "ldr x20, [x15, #0x98]\n" + ".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n" + "ldr x23, [x10, #0x8]\n" + ".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n" + "ldr d25, [x15, #0x80]\n" + ".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n" + "mov v25.d[1], x21\n" + ".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n" + "ldr x21, [x15, #0xa8]\n" + ".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n" + "ldr x22, [x9, #0x8]\n" + ".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n" + "ldr d24, [x15, #0x90]\n" + "mov v24.d[1], x20\n" + ".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n" + ".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n" + "ldr x20, [x15, #0xb8]\n" + ".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n" "sub x13, x13, #0x10\n" - ".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n" - "ldr d6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xc8]\n" - ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n" + ".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n" + "ldr d25, [x15, #0xa0]\n" + ".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n" + "mov v25.d[1], x21\n" + ".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n" + "ldr x21, [x15, #0xc8]\n" + ".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n" "cmp x13, #0x20\n" - ".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n" - "ldr d7, [x15, #0xb0]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xd8]\n" - ".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n" + ".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n" + "ldr d24, [x15, #0xb0]\n" + "mov v24.d[1], x20\n" + ".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n" + ".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n" + "ldr x20, [x15, #0xd8]\n" + ".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n" "prfm pldl1keep, [x12, #0x80]\n" - ".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n" - "ldr d6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xe8]\n" - ".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n" + ".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n" + "ldr d25, [x15, #0xc0]\n" + ".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n" + "mov v25.d[1], x21\n" + ".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n" + "ldr x21, [x15, #0xe8]\n" + ".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n" + "prfm pldl1keep, [x11, #0x80]\n" + ".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n" + "ldr d24, [x15, #0xd0]\n" + "mov v24.d[1], x20\n" + ".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n" + ".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n" + "ldr x20, [x15, #0xf8]\n" + ".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n" + "prfm pldl1keep, [x10, #0x80]\n" + ".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n" + "ldr d25, [x15, #0xe0]\n" + ".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n" + "mov v25.d[1], x21\n" + ".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n" "prfm pldl1keep, [x9, #0x80]\n" - ".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n" - "ldr d7, [x15, #0xd0]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - "ldr x11, [x15, #0xf8]\n" - ".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n" - "prfm pldl1keep, [x27, #0x80]\n" - ".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n" - "ldr d6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - "prfm pldl1keep, [x25, #0x80]\n" - ".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n" - ".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n" - "ldr d7, [x15, #0xf0]\n" - "mov v7.d[1], x11\n" + ".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n" + ".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n" + "ldr d24, [x15, #0xf0]\n" + "mov v24.d[1], x20\n" "add x15, x15, #0x100\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" - "ldr x20, [x15, #0x8]\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" - "ldr x11, [x15, #0x18]\n" - ".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n" + ".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n" + "ldr x21, [x15, #0x8]\n" + ".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n" + "ldr x20, [x15, #0x18]\n" + ".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n" + ".inst 0x4fa3eb36 // sdot v22.4s, v25.16b, v3.4b[3]\n" "ldr d6, [x15, #0x0]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" + ".inst 0x4fa0eb0b // sdot v11.4s, v24.16b, v0.4b[3]\n" "ldr d0, [x12, #0x0]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - "ldr d1, [x9, #0x0]\n" - ".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n" - "ldr d2, [x27, #0x0]\n" - ".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n" - "ldr d3, [x25, #0x0]\n" + ".inst 0x4fa1eb0f // sdot v15.4s, v24.16b, v1.4b[3]\n" + "ldr d1, [x11, #0x0]\n" + ".inst 0x4fa2eb13 // sdot v19.4s, v24.16b, v2.4b[3]\n" + "ldr d2, [x10, #0x0]\n" + ".inst 0x4fa3eb17 // sdot v23.4s, v24.16b, v3.4b[3]\n" + "ldr d3, [x9, #0x0]\n" "ldr d7, [x15, #0x10]\n" - "mov v6.d[1], x20\n" - "mov v0.d[1], x10\n" - "mov v1.d[1], x28\n" - "mov v2.d[1], x26\n" - "mov v3.d[1], x24\n" - "mov v7.d[1], x11\n" + "mov v6.d[1], x21\n" + "mov v0.d[1], x25\n" + "mov v1.d[1], x24\n" + "mov v2.d[1], x23\n" + "mov v3.d[1], x22\n" + "mov v7.d[1], x20\n" "bge 88b\n" "89:" // Height 4: Multiply loop: Single iteration only ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" "add x12, x12, #0x10\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "add x9, x9, #0x10\n" + "add x11, x11, #0x10\n" ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "add x27, x27, #0x10\n" + "add x10, x10, #0x10\n" ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "ldr q6, [x15, #0x20]\n" + "ldr q25, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "add x25, x25, #0x10\n" + "add x9, x9, #0x10\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" "sub x13, x13, #0x10\n" ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" "prfm pldl1keep, [x12, #0x80]\n" ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q24, [x15, #0x30]\n" + ".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n" + "prfm pldl1keep, [x11, #0x80]\n" + ".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n" + "prfm pldl1keep, [x10, #0x80]\n" + ".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n" "prfm pldl1keep, [x9, #0x80]\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "prfm pldl1keep, [x27, #0x80]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "prfm pldl1keep, [x25, #0x80]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - "ldr q6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - "ldr q7, [x15, #0x50]\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n" - ".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n" - "ldr q6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n" - ".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n" - "ldr q7, [x15, #0x70]\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - ".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n" - ".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n" - "ldr q6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - ".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n" - ".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n" - "ldr q7, [x15, #0x90]\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n" - ".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n" - "ldr q6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n" - ".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n" - "ldr q7, [x15, #0xb0]\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - ".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n" - ".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n" - "ldr q6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - ".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n" - ".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n" - "ldr q7, [x15, #0xd0]\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - ".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n" - "ldr q6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - ".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n" - ".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n" - "ldr q7, [x15, #0xf0]\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" + ".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n" + "ldr q25, [x15, #0x40]\n" + ".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n" + ".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n" + ".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n" + ".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n" + "ldr q24, [x15, #0x50]\n" + ".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n" + ".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n" + ".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n" + ".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n" + "ldr q25, [x15, #0x60]\n" + ".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n" + ".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n" + ".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n" + ".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n" + "ldr q24, [x15, #0x70]\n" + ".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n" + ".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n" + ".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n" + ".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n" + "ldr q25, [x15, #0x80]\n" + ".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n" + ".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n" + ".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n" + ".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n" + "ldr q24, [x15, #0x90]\n" + ".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n" + ".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n" + ".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n" + ".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n" + "ldr q25, [x15, #0xa0]\n" + ".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n" + ".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n" + ".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n" + ".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n" + "ldr q24, [x15, #0xb0]\n" + ".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n" + ".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n" + ".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n" + ".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n" + "ldr q25, [x15, #0xc0]\n" + ".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n" + ".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n" + ".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n" + ".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n" + "ldr q24, [x15, #0xd0]\n" + ".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n" + ".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n" + ".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n" + ".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n" + "ldr q25, [x15, #0xe0]\n" + ".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n" + ".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n" + ".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n" + ".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n" + "ldr q24, [x15, #0xf0]\n" + ".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n" "add x15, x15, #0x100\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" - ".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - ".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n" - ".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n" + ".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n" + ".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n" + ".inst 0x4fa3eb36 // sdot v22.4s, v25.16b, v3.4b[3]\n" + ".inst 0x4fa0eb0b // sdot v11.4s, v24.16b, v0.4b[3]\n" + ".inst 0x4fa1eb0f // sdot v15.4s, v24.16b, v1.4b[3]\n" + ".inst 0x4fa2eb13 // sdot v19.4s, v24.16b, v2.4b[3]\n" + ".inst 0x4fa3eb17 // sdot v23.4s, v24.16b, v3.4b[3]\n" "90:" // Height 4: Multiply loop: Main loop skip "cbz x13, 95f\n" "cmp x13, #0x4\n" "blt 92f\n" "91:" // Height 4: Multiply loop: Odd block loop - "ldr s0, [x12], #0x4\n" + "ldr s29, [x12], #0x4\n" "sub x13, x13, #0x4\n" - "ldr s1, [x9], #0x4\n" + "ldr s28, [x11], #0x4\n" "cmp x13, #0x4\n" - "ldr s2, [x27], #0x4\n" - "ldr s3, [x25], #0x4\n" - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr s27, [x10], #0x4\n" + "ldr s26, [x9], #0x4\n" + "ldr q25, [x15, #0x0]\n" + ".inst 0x4f9de328 // sdot v8.4s, v25.16b, v29.4b[0]\n" + "ldr q24, [x15, #0x10]\n" + ".inst 0x4f9ce32c // sdot v12.4s, v25.16b, v28.4b[0]\n" + ".inst 0x4f9be330 // sdot v16.4s, v25.16b, v27.4b[0]\n" + ".inst 0x4f9ae334 // sdot v20.4s, v25.16b, v26.4b[0]\n" + "ldr q25, [x15, #0x20]\n" + ".inst 0x4f9de309 // sdot v9.4s, v24.16b, v29.4b[0]\n" + ".inst 0x4f9ce30d // sdot v13.4s, v24.16b, v28.4b[0]\n" + ".inst 0x4f9be311 // sdot v17.4s, v24.16b, v27.4b[0]\n" + ".inst 0x4f9ae315 // sdot v21.4s, v24.16b, v26.4b[0]\n" + "ldr q24, [x15, #0x30]\n" + ".inst 0x4f9de32a // sdot v10.4s, v25.16b, v29.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" + ".inst 0x4f9ce32e // sdot v14.4s, v25.16b, v28.4b[0]\n" + ".inst 0x4f9be332 // sdot v18.4s, v25.16b, v27.4b[0]\n" + ".inst 0x4f9ae336 // sdot v22.4s, v25.16b, v26.4b[0]\n" + ".inst 0x4f9de30b // sdot v11.4s, v24.16b, v29.4b[0]\n" + ".inst 0x4f9ce30f // sdot v15.4s, v24.16b, v28.4b[0]\n" + ".inst 0x4f9be313 // sdot v19.4s, v24.16b, v27.4b[0]\n" + ".inst 0x4f9ae317 // sdot v23.4s, v24.16b, v26.4b[0]\n" "bge 91b\n" "92:" // Height 4: Multiply loop: Skip odd blocks "cbz x13, 95f\n" "tbz x13, #1, 93f\n" "ldr h0, [x12], #0x2\n" - "ldr h1, [x9], #0x2\n" - "ldr h2, [x27], #0x2\n" - "ldr h3, [x25], #0x2\n" + "ldr h1, [x11], #0x2\n" + "ldr h2, [x10], #0x2\n" + "ldr h3, [x9], #0x2\n" "tbz x13, #0, 94f\n" "ld1 { v0.b }[2], [x12]\n" - "ld1 { v1.b }[2], [x9]\n" - "ld1 { v2.b }[2], [x27]\n" - "ld1 { v3.b }[2], [x25]\n" + "ld1 { v1.b }[2], [x11]\n" + "ld1 { v2.b }[2], [x10]\n" + "ld1 { v3.b }[2], [x9]\n" "b 94f\n" "93:" // Height 4: Multiply loop: Ragged operand read: partial_1_0 "ldr b0, [x12, #0x0]\n" - "ldr b1, [x9, #0x0]\n" - "ldr b2, [x27, #0x0]\n" - "ldr b3, [x25, #0x0]\n" + "ldr b1, [x11, #0x0]\n" + "ldr b2, [x10, #0x0]\n" + "ldr b3, [x9, #0x0]\n" "94:" // Height 4: Multiply loop: Ragged operand read: Done - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q25, [x15, #0x0]\n" + ".inst 0x4f80e328 // sdot v8.4s, v25.16b, v0.4b[0]\n" + "ldr q24, [x15, #0x10]\n" + ".inst 0x4f81e32c // sdot v12.4s, v25.16b, v1.4b[0]\n" + ".inst 0x4f82e330 // sdot v16.4s, v25.16b, v2.4b[0]\n" + ".inst 0x4f83e334 // sdot v20.4s, v25.16b, v3.4b[0]\n" + "ldr q25, [x15, #0x20]\n" + ".inst 0x4f80e309 // sdot v9.4s, v24.16b, v0.4b[0]\n" + ".inst 0x4f81e30d // sdot v13.4s, v24.16b, v1.4b[0]\n" + ".inst 0x4f82e311 // sdot v17.4s, v24.16b, v2.4b[0]\n" + ".inst 0x4f83e315 // sdot v21.4s, v24.16b, v3.4b[0]\n" + "ldr q24, [x15, #0x30]\n" + ".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" + ".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n" + ".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n" + ".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n" + ".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n" + ".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n" + ".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n" + ".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n" "95:" // Height 4: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x14, x14, #0x1\n" "cmp x14, x20\n" "bne 85b\n" - "ldr q0, [x6, #0x0]\n" - "add v8.4s, v8.4s, v0.4s\n" - "ldr q1, [x6, #0x10]\n" - "add v9.4s, v9.4s, v1.4s\n" - "ldr q2, [x6, #0x20]\n" - "add v10.4s, v10.4s, v2.4s\n" - "ldr q3, [x6, #0x30]\n" - "add v11.4s, v11.4s, v3.4s\n" + "ldr q27, [x6, #0x0]\n" + "add v8.4s, v8.4s, v27.4s\n" + "ldr q26, [x6, #0x10]\n" + "add v9.4s, v9.4s, v26.4s\n" + "ldr q25, [x6, #0x20]\n" + "add v10.4s, v10.4s, v25.4s\n" + "ldr q24, [x6, #0x30]\n" + "add v11.4s, v11.4s, v24.4s\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "add x24, x17, x20\n" + "add x25, x17, x20\n" + "add x24, x25, x20\n" "add x23, x24, x20\n" - "add x22, x23, x20\n" "prfm pstl1keep, [x17, #0x0]\n" - "add v12.4s, v12.4s, v0.4s\n" + "add v12.4s, v12.4s, v27.4s\n" + "prfm pstl1keep, [x25, #0x0]\n" + "add v13.4s, v13.4s, v26.4s\n" "prfm pstl1keep, [x24, #0x0]\n" - "add v13.4s, v13.4s, v1.4s\n" + "add v14.4s, v14.4s, v25.4s\n" "prfm pstl1keep, [x23, #0x0]\n" - "add v14.4s, v14.4s, v2.4s\n" - "prfm pstl1keep, [x22, #0x0]\n" - "add v15.4s, v15.4s, v3.4s\n" - "add v16.4s, v16.4s, v0.4s\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add v19.4s, v19.4s, v3.4s\n" - "add v20.4s, v20.4s, v0.4s\n" - "add v21.4s, v21.4s, v1.4s\n" - "add v22.4s, v22.4s, v2.4s\n" - "add v23.4s, v23.4s, v3.4s\n" + "add v15.4s, v15.4s, v24.4s\n" + "add v16.4s, v16.4s, v27.4s\n" + "add v17.4s, v17.4s, v26.4s\n" + "add v18.4s, v18.4s, v25.4s\n" + "add v19.4s, v19.4s, v24.4s\n" + "add v20.4s, v20.4s, v27.4s\n" + "add v21.4s, v21.4s, v26.4s\n" + "add v22.4s, v22.4s, v25.4s\n" + "add v23.4s, v23.4s, v24.4s\n" "add x6, x6, #0x40\n" "tbz %x[flags], #4, 96f\n" "ldr q0, [x8, #0x0]\n" @@ -1804,10 +1803,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "add x7, x7, #0x40\n" "b 97f\n" "96:" // Height 4: per layer parameters - "add x25, %x[qp], %[per_layer_right_shift]\n" - "ld1r { v0.4s }, [x25]\n" - "add x25, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x25]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "ld1r { v0.4s }, [x20]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v4.4s }, [x20]\n" "mov v1.16b, v0.16b\n" "mov v5.16b, v4.16b\n" "mov v2.16b, v0.16b\n" @@ -1832,54 +1831,54 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "sqrdmulh v22.4s, v22.4s, v6.4s\n" "sqrdmulh v23.4s, v23.4s, v7.4s\n" "tbz %x[flags], #5, 98f\n" - "and v4.16b, v8.16b, v0.16b\n" - "and v5.16b, v9.16b, v1.16b\n" - "and v6.16b, v10.16b, v2.16b\n" - "and v7.16b, v11.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v8.4s, v8.4s, v4.4s\n" - "sqadd v9.4s, v9.4s, v5.4s\n" - "sqadd v10.4s, v10.4s, v6.4s\n" - "sqadd v11.4s, v11.4s, v7.4s\n" - "and v4.16b, v12.16b, v0.16b\n" - "and v5.16b, v13.16b, v1.16b\n" - "and v6.16b, v14.16b, v2.16b\n" - "and v7.16b, v15.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v12.4s, v12.4s, v4.4s\n" - "sqadd v13.4s, v13.4s, v5.4s\n" - "sqadd v14.4s, v14.4s, v6.4s\n" - "sqadd v15.4s, v15.4s, v7.4s\n" - "and v4.16b, v16.16b, v0.16b\n" - "and v5.16b, v17.16b, v1.16b\n" - "and v6.16b, v18.16b, v2.16b\n" - "and v7.16b, v19.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v16.4s, v16.4s, v4.4s\n" - "sqadd v17.4s, v17.4s, v5.4s\n" - "sqadd v18.4s, v18.4s, v6.4s\n" - "sqadd v19.4s, v19.4s, v7.4s\n" - "and v4.16b, v20.16b, v0.16b\n" - "and v5.16b, v21.16b, v1.16b\n" - "and v6.16b, v22.16b, v2.16b\n" - "and v7.16b, v23.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "sqadd v21.4s, v21.4s, v5.4s\n" - "sqadd v22.4s, v22.4s, v6.4s\n" - "sqadd v23.4s, v23.4s, v7.4s\n" + "and v27.16b, v8.16b, v0.16b\n" + "and v26.16b, v9.16b, v1.16b\n" + "and v25.16b, v10.16b, v2.16b\n" + "and v24.16b, v11.16b, v3.16b\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sshr v24.4s, v24.4s, #0x1f\n" + "sqadd v8.4s, v8.4s, v27.4s\n" + "sqadd v9.4s, v9.4s, v26.4s\n" + "sqadd v10.4s, v10.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v24.4s\n" + "and v27.16b, v12.16b, v0.16b\n" + "and v26.16b, v13.16b, v1.16b\n" + "and v25.16b, v14.16b, v2.16b\n" + "and v24.16b, v15.16b, v3.16b\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sshr v24.4s, v24.4s, #0x1f\n" + "sqadd v12.4s, v12.4s, v27.4s\n" + "sqadd v13.4s, v13.4s, v26.4s\n" + "sqadd v14.4s, v14.4s, v25.4s\n" + "sqadd v15.4s, v15.4s, v24.4s\n" + "and v27.16b, v16.16b, v0.16b\n" + "and v26.16b, v17.16b, v1.16b\n" + "and v25.16b, v18.16b, v2.16b\n" + "and v24.16b, v19.16b, v3.16b\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sshr v24.4s, v24.4s, #0x1f\n" + "sqadd v16.4s, v16.4s, v27.4s\n" + "sqadd v17.4s, v17.4s, v26.4s\n" + "sqadd v18.4s, v18.4s, v25.4s\n" + "sqadd v19.4s, v19.4s, v24.4s\n" + "and v27.16b, v20.16b, v0.16b\n" + "and v26.16b, v21.16b, v1.16b\n" + "and v25.16b, v22.16b, v2.16b\n" + "and v24.16b, v23.16b, v3.16b\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sshr v24.4s, v24.4s, #0x1f\n" + "sqadd v20.4s, v20.4s, v27.4s\n" + "sqadd v21.4s, v21.4s, v26.4s\n" + "sqadd v22.4s, v22.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v24.4s\n" "98:" // Height 4: no shift correction "srshl v8.4s, v8.4s, v0.4s\n" "srshl v9.4s, v9.4s, v1.4s\n" @@ -1897,170 +1896,170 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "srshl v21.4s, v21.4s, v1.4s\n" "srshl v22.4s, v22.4s, v2.4s\n" "srshl v23.4s, v23.4s, v3.4s\n" - "add x25, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x25]\n" - "add v8.4s, v8.4s, v4.4s\n" - "add v9.4s, v9.4s, v4.4s\n" - "add v10.4s, v10.4s, v4.4s\n" - "add v11.4s, v11.4s, v4.4s\n" - "add v12.4s, v12.4s, v4.4s\n" - "add v13.4s, v13.4s, v4.4s\n" - "add v14.4s, v14.4s, v4.4s\n" - "add v15.4s, v15.4s, v4.4s\n" - "add v16.4s, v16.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" - "add v20.4s, v20.4s, v4.4s\n" - "add v21.4s, v21.4s, v4.4s\n" - "add v22.4s, v22.4s, v4.4s\n" - "add v23.4s, v23.4s, v4.4s\n" - "add x25, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x25]\n" - "smin v8.4s, v8.4s, v6.4s\n" - "smin v9.4s, v9.4s, v6.4s\n" - "smin v10.4s, v10.4s, v6.4s\n" - "smin v11.4s, v11.4s, v6.4s\n" - "smin v12.4s, v12.4s, v6.4s\n" - "smin v13.4s, v13.4s, v6.4s\n" - "smin v14.4s, v14.4s, v6.4s\n" - "smin v15.4s, v15.4s, v6.4s\n" - "smin v16.4s, v16.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "smin v20.4s, v20.4s, v6.4s\n" - "smin v21.4s, v21.4s, v6.4s\n" - "smin v22.4s, v22.4s, v6.4s\n" - "smin v23.4s, v23.4s, v6.4s\n" - "add x25, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x25]\n" - "smax v8.4s, v8.4s, v5.4s\n" - "smax v9.4s, v9.4s, v5.4s\n" - "smax v10.4s, v10.4s, v5.4s\n" - "smax v11.4s, v11.4s, v5.4s\n" - "smax v12.4s, v12.4s, v5.4s\n" - "smax v13.4s, v13.4s, v5.4s\n" - "smax v14.4s, v14.4s, v5.4s\n" - "smax v15.4s, v15.4s, v5.4s\n" - "smax v16.4s, v16.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" - "smax v20.4s, v20.4s, v5.4s\n" - "smax v21.4s, v21.4s, v5.4s\n" - "smax v22.4s, v22.4s, v5.4s\n" - "smax v23.4s, v23.4s, v5.4s\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v24.4s }, [x20]\n" + "add v8.4s, v8.4s, v24.4s\n" + "add v9.4s, v9.4s, v24.4s\n" + "add v10.4s, v10.4s, v24.4s\n" + "add v11.4s, v11.4s, v24.4s\n" + "add v12.4s, v12.4s, v24.4s\n" + "add v13.4s, v13.4s, v24.4s\n" + "add v14.4s, v14.4s, v24.4s\n" + "add v15.4s, v15.4s, v24.4s\n" + "add v16.4s, v16.4s, v24.4s\n" + "add v17.4s, v17.4s, v24.4s\n" + "add v18.4s, v18.4s, v24.4s\n" + "add v19.4s, v19.4s, v24.4s\n" + "add v20.4s, v20.4s, v24.4s\n" + "add v21.4s, v21.4s, v24.4s\n" + "add v22.4s, v22.4s, v24.4s\n" + "add v23.4s, v23.4s, v24.4s\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v24.4s }, [x20]\n" + "smin v8.4s, v8.4s, v24.4s\n" + "smin v9.4s, v9.4s, v24.4s\n" + "smin v10.4s, v10.4s, v24.4s\n" + "smin v11.4s, v11.4s, v24.4s\n" + "smin v12.4s, v12.4s, v24.4s\n" + "smin v13.4s, v13.4s, v24.4s\n" + "smin v14.4s, v14.4s, v24.4s\n" + "smin v15.4s, v15.4s, v24.4s\n" + "smin v16.4s, v16.4s, v24.4s\n" + "smin v17.4s, v17.4s, v24.4s\n" + "smin v18.4s, v18.4s, v24.4s\n" + "smin v19.4s, v19.4s, v24.4s\n" + "smin v20.4s, v20.4s, v24.4s\n" + "smin v21.4s, v21.4s, v24.4s\n" + "smin v22.4s, v22.4s, v24.4s\n" + "smin v23.4s, v23.4s, v24.4s\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v24.4s }, [x20]\n" + "smax v8.4s, v8.4s, v24.4s\n" + "smax v9.4s, v9.4s, v24.4s\n" + "smax v10.4s, v10.4s, v24.4s\n" + "smax v11.4s, v11.4s, v24.4s\n" + "smax v12.4s, v12.4s, v24.4s\n" + "smax v13.4s, v13.4s, v24.4s\n" + "smax v14.4s, v14.4s, v24.4s\n" + "smax v15.4s, v15.4s, v24.4s\n" + "smax v16.4s, v16.4s, v24.4s\n" + "smax v17.4s, v17.4s, v24.4s\n" + "smax v18.4s, v18.4s, v24.4s\n" + "smax v19.4s, v19.4s, v24.4s\n" + "smax v20.4s, v20.4s, v24.4s\n" + "smax v21.4s, v21.4s, v24.4s\n" + "smax v22.4s, v22.4s, v24.4s\n" + "smax v23.4s, v23.4s, v24.4s\n" "uzp1 v8.8h, v8.8h, v9.8h\n" - "uzp1 v9.8h, v10.8h, v11.8h\n" + "uzp1 v25.8h, v10.8h, v11.8h\n" "uzp1 v12.8h, v12.8h, v13.8h\n" - "uzp1 v13.8h, v14.8h, v15.8h\n" + "uzp1 v24.8h, v14.8h, v15.8h\n" "uzp1 v16.8h, v16.8h, v17.8h\n" - "uzp1 v17.8h, v18.8h, v19.8h\n" + "uzp1 v18.8h, v18.8h, v19.8h\n" "uzp1 v20.8h, v20.8h, v21.8h\n" - "uzp1 v21.8h, v22.8h, v23.8h\n" + "uzp1 v17.8h, v22.8h, v23.8h\n" "cmp x16, #0x10\n" - "uzp1 v8.16b, v8.16b, v9.16b\n" - "uzp1 v12.16b, v12.16b, v13.16b\n" - "uzp1 v16.16b, v16.16b, v17.16b\n" - "uzp1 v20.16b, v20.16b, v21.16b\n" + "uzp1 v8.16b, v8.16b, v25.16b\n" + "uzp1 v12.16b, v12.16b, v24.16b\n" + "uzp1 v16.16b, v16.16b, v18.16b\n" + "uzp1 v20.16b, v20.16b, v17.16b\n" "bge 107f\n" "tbz x16, #3, 102f\n" "str d8, [x17], #0x8\n" - "str d12, [x24], #0x8\n" - "str d16, [x23], #0x8\n" - "str d20, [x22], #0x8\n" + "str d12, [x25], #0x8\n" + "str d16, [x24], #0x8\n" + "str d20, [x23], #0x8\n" "tbz x16, #2, 100f\n" "st1 { v8.s }[2], [x17], #0x4\n" - "st1 { v12.s }[2], [x24], #0x4\n" - "st1 { v16.s }[2], [x23], #0x4\n" - "st1 { v20.s }[2], [x22], #0x4\n" + "st1 { v12.s }[2], [x25], #0x4\n" + "st1 { v16.s }[2], [x24], #0x4\n" + "st1 { v20.s }[2], [x23], #0x4\n" "tbz x16, #1, 99f\n" "st1 { v8.h }[6], [x17], #0x2\n" - "st1 { v12.h }[6], [x24], #0x2\n" - "st1 { v16.h }[6], [x23], #0x2\n" - "st1 { v20.h }[6], [x22], #0x2\n" + "st1 { v12.h }[6], [x25], #0x2\n" + "st1 { v16.h }[6], [x24], #0x2\n" + "st1 { v20.h }[6], [x23], #0x2\n" "tbz x16, #0, 106f\n" "st1 { v8.b }[14], [x17]\n" - "st1 { v12.b }[14], [x24]\n" - "st1 { v16.b }[14], [x23]\n" - "st1 { v20.b }[14], [x22]\n" + "st1 { v12.b }[14], [x25]\n" + "st1 { v16.b }[14], [x24]\n" + "st1 { v20.b }[14], [x23]\n" "b 106f\n" "99:" // Height 4: Partial direct writeback: partial_1_12 "tbz x16, #0, 106f\n" "st1 { v8.b }[12], [x17]\n" - "st1 { v12.b }[12], [x24]\n" - "st1 { v16.b }[12], [x23]\n" - "st1 { v20.b }[12], [x22]\n" + "st1 { v12.b }[12], [x25]\n" + "st1 { v16.b }[12], [x24]\n" + "st1 { v20.b }[12], [x23]\n" "b 106f\n" "100:" // Height 4: Partial direct writeback: partial_2_8 "tbz x16, #1, 101f\n" "st1 { v8.h }[4], [x17], #0x2\n" - "st1 { v12.h }[4], [x24], #0x2\n" - "st1 { v16.h }[4], [x23], #0x2\n" - "st1 { v20.h }[4], [x22], #0x2\n" + "st1 { v12.h }[4], [x25], #0x2\n" + "st1 { v16.h }[4], [x24], #0x2\n" + "st1 { v20.h }[4], [x23], #0x2\n" "tbz x16, #0, 106f\n" "st1 { v8.b }[10], [x17]\n" - "st1 { v12.b }[10], [x24]\n" - "st1 { v16.b }[10], [x23]\n" - "st1 { v20.b }[10], [x22]\n" + "st1 { v12.b }[10], [x25]\n" + "st1 { v16.b }[10], [x24]\n" + "st1 { v20.b }[10], [x23]\n" "b 106f\n" "101:" // Height 4: Partial direct writeback: partial_1_8 "tbz x16, #0, 106f\n" "st1 { v8.b }[8], [x17]\n" - "st1 { v12.b }[8], [x24]\n" - "st1 { v16.b }[8], [x23]\n" - "st1 { v20.b }[8], [x22]\n" + "st1 { v12.b }[8], [x25]\n" + "st1 { v16.b }[8], [x24]\n" + "st1 { v20.b }[8], [x23]\n" "b 106f\n" "102:" // Height 4: Partial direct writeback: partial_4_0 "tbz x16, #2, 104f\n" "str s8, [x17], #0x4\n" - "str s12, [x24], #0x4\n" - "str s16, [x23], #0x4\n" - "str s20, [x22], #0x4\n" + "str s12, [x25], #0x4\n" + "str s16, [x24], #0x4\n" + "str s20, [x23], #0x4\n" "tbz x16, #1, 103f\n" "st1 { v8.h }[2], [x17], #0x2\n" - "st1 { v12.h }[2], [x24], #0x2\n" - "st1 { v16.h }[2], [x23], #0x2\n" - "st1 { v20.h }[2], [x22], #0x2\n" + "st1 { v12.h }[2], [x25], #0x2\n" + "st1 { v16.h }[2], [x24], #0x2\n" + "st1 { v20.h }[2], [x23], #0x2\n" "tbz x16, #0, 106f\n" "st1 { v8.b }[6], [x17]\n" - "st1 { v12.b }[6], [x24]\n" - "st1 { v16.b }[6], [x23]\n" - "st1 { v20.b }[6], [x22]\n" + "st1 { v12.b }[6], [x25]\n" + "st1 { v16.b }[6], [x24]\n" + "st1 { v20.b }[6], [x23]\n" "b 106f\n" "103:" // Height 4: Partial direct writeback: partial_1_4 "tbz x16, #0, 106f\n" "st1 { v8.b }[4], [x17]\n" - "st1 { v12.b }[4], [x24]\n" - "st1 { v16.b }[4], [x23]\n" - "st1 { v20.b }[4], [x22]\n" + "st1 { v12.b }[4], [x25]\n" + "st1 { v16.b }[4], [x24]\n" + "st1 { v20.b }[4], [x23]\n" "b 106f\n" "104:" // Height 4: Partial direct writeback: partial_2_0 "tbz x16, #1, 105f\n" "str h8, [x17], #0x2\n" - "str h12, [x24], #0x2\n" - "str h16, [x23], #0x2\n" - "str h20, [x22], #0x2\n" + "str h12, [x25], #0x2\n" + "str h16, [x24], #0x2\n" + "str h20, [x23], #0x2\n" "tbz x16, #0, 106f\n" "st1 { v8.b }[2], [x17]\n" - "st1 { v12.b }[2], [x24]\n" - "st1 { v16.b }[2], [x23]\n" - "st1 { v20.b }[2], [x22]\n" + "st1 { v12.b }[2], [x25]\n" + "st1 { v16.b }[2], [x24]\n" + "st1 { v20.b }[2], [x23]\n" "b 106f\n" "105:" // Height 4: Partial direct writeback: partial_1_0 "str b8, [x17, #0x0]\n" - "str b12, [x24, #0x0]\n" - "str b16, [x23, #0x0]\n" - "str b20, [x22, #0x0]\n" + "str b12, [x25, #0x0]\n" + "str b16, [x24, #0x0]\n" + "str b20, [x23, #0x0]\n" "106:" // Height 4: Partial direct writeback: Done "b 108f\n" "107:" // Height 4: Full writeback "str q8, [x17, #0x0]\n" "add x17, x17, #0x10\n" - "str q12, [x24, #0x0]\n" - "str q16, [x23, #0x0]\n" - "str q20, [x22, #0x0]\n" + "str q12, [x25, #0x0]\n" + "str q16, [x24, #0x0]\n" + "str q20, [x23, #0x0]\n" "108:" // Height 4: Writeback done "subs x16, x16, #0x10\n" "bgt 83b\n" @@ -2089,439 +2088,439 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "movi v21.4s, #0x0\n" "movi v22.4s, #0x0\n" "movi v23.4s, #0x0\n" - "movi v24.4s, #0x0\n" - "movi v25.4s, #0x0\n" - "movi v26.4s, #0x0\n" - "movi v27.4s, #0x0\n" - "111:" // Height 5: setup done - "mov x14, #0x0\n" - "112:" // Height 5: String loop - "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr w13, [x20, x14, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" - "tbz %x[flags], #3, 113f\n" - "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x12, [x21, #0x0]\n" - "ldr x9, [x21, #0x8]\n" - "ldr x27, [x21, #0x10]\n" - "ldr x25, [x21, #0x18]\n" - "ldr x23, [x21, #0x20]\n" - "cbnz x14, 114f\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" - "add x12, x12, x20\n" - "add x9, x9, x20\n" - "add x27, x27, x20\n" - "add x25, x25, x20\n" - "add x23, x23, x20\n" - "b 114f\n" - "113:" // Height 5: setup direct input - "mov x12, %x[input_ptr]\n" - "add x9, x12, x20\n" - "add x27, x9, x20\n" - "add x25, x27, x20\n" - "add x23, x25, x20\n" - "114:" // Height 5: input setup done - "cmp x13, #0x10\n" - "blt 117f\n" - "ldr q0, [x12, #0x0]\n" - "cmp x13, #0x20\n" - "ldr q1, [x9, #0x0]\n" - "ldr q2, [x27, #0x0]\n" - "ldr q3, [x25, #0x0]\n" - "ldr q4, [x23, #0x0]\n" - "ldr q6, [x15, #0x0]\n" - "ldr q7, [x15, #0x10]\n" - "blt 116f\n" - "115:" // Height 5: Multiply loop: Main loop head - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr x20, [x15, #0x28]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x38]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "add x12, x12, #0x10\n" - ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "add x9, x9, #0x10\n" - ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - "ldr d6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x48]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "add x27, x27, #0x10\n" - ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "add x25, x25, #0x10\n" - ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - "ldr d7, [x15, #0x30]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x58]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "add x23, x23, #0x10\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - "ldr x10, [x12, #0x8]\n" - ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" - "ldr d6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x68]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - "ldr x28, [x9, #0x8]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - "ldr x26, [x27, #0x8]\n" - ".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n" - "ldr d7, [x15, #0x50]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x78]\n" - ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n" - "ldr x24, [x25, #0x8]\n" - ".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n" - "ldr x22, [x23, #0x8]\n" - ".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n" - "ldr d6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0x88]\n" - ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n" - "sub x13, x13, #0x10\n" - ".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n" - "cmp x13, #0x20\n" - ".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n" - "ldr d7, [x15, #0x70]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x98]\n" - ".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n" - "prfm pldl1keep, [x12, #0x80]\n" - ".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n" - "prfm pldl1keep, [x9, #0x80]\n" - ".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n" - "ldr d6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0xa8]\n" - ".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n" - "prfm pldl1keep, [x27, #0x80]\n" - ".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n" - "prfm pldl1keep, [x25, #0x80]\n" - ".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n" - "ldr d7, [x15, #0x90]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xb8]\n" - ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n" - "prfm pldl1keep, [x23, #0x80]\n" - ".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n" - ".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n" - "ldr d6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xc8]\n" - ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n" - ".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n" - ".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n" - "ldr d7, [x15, #0xb0]\n" - "mov v7.d[1], x11\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xd8]\n" - ".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n" - ".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n" - ".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n" - "ldr d6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xe8]\n" - ".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n" - ".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n" - ".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n" - "ldr d7, [x15, #0xd0]\n" - "mov v7.d[1], x11\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - "ldr x11, [x15, #0xf8]\n" - ".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n" - ".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n" - "ldr d6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - "mov v6.d[1], x20\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - ".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n" - ".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n" - ".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n" - "ldr d7, [x15, #0xf0]\n" - "mov v7.d[1], x11\n" - "add x15, x15, #0x100\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" - "ldr x20, [x15, #0x8]\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" - "ldr x11, [x15, #0x18]\n" - ".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n" - ".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n" - "ldr d6, [x15, #0x0]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" - "ldr d0, [x12, #0x0]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - "ldr d1, [x9, #0x0]\n" - ".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n" - "ldr d2, [x27, #0x0]\n" - ".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n" - "ldr d3, [x25, #0x0]\n" - ".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n" - "ldr d4, [x23, #0x0]\n" - "ldr d7, [x15, #0x10]\n" - "mov v6.d[1], x20\n" - "mov v0.d[1], x10\n" - "mov v1.d[1], x28\n" - "mov v2.d[1], x26\n" - "mov v3.d[1], x24\n" - "mov v4.d[1], x22\n" - "mov v7.d[1], x11\n" - "bge 115b\n" - "116:" // Height 5: Multiply loop: Single iteration only - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "add x12, x12, #0x10\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "add x9, x9, #0x10\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "add x27, x27, #0x10\n" - ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "add x25, x25, #0x10\n" - ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "add x23, x23, #0x10\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "sub x13, x13, #0x10\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "prfm pldl1keep, [x12, #0x80]\n" - ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "prfm pldl1keep, [x9, #0x80]\n" - ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - "prfm pldl1keep, [x27, #0x80]\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "prfm pldl1keep, [x25, #0x80]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "prfm pldl1keep, [x23, #0x80]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" - "ldr q6, [x15, #0x40]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n" - "ldr q7, [x15, #0x50]\n" - ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n" - ".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n" - ".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n" - "ldr q6, [x15, #0x60]\n" - ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n" - ".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n" - ".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n" - "ldr q7, [x15, #0x70]\n" - ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - ".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n" - ".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n" - ".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n" - "ldr q6, [x15, #0x80]\n" - ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - ".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n" - ".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n" - ".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n" - "ldr q7, [x15, #0x90]\n" - ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n" - ".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n" - ".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n" - "ldr q6, [x15, #0xa0]\n" - ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n" - ".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n" - ".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n" - "ldr q7, [x15, #0xb0]\n" - ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" - ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - ".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n" - ".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n" - ".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n" - "ldr q6, [x15, #0xc0]\n" - ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - ".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n" - ".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n" - ".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n" - "ldr q7, [x15, #0xd0]\n" - ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" - ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - ".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n" - ".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n" - "ldr q6, [x15, #0xe0]\n" - ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - ".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n" - ".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n" - ".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n" - "ldr q7, [x15, #0xf0]\n" - ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" - "add x15, x15, #0x100\n" - ".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n" - ".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n" - ".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n" - ".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n" - ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" - ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - ".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n" - ".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n" - ".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n" - "117:" // Height 5: Multiply loop: Main loop skip - "cbz x13, 122f\n" - "cmp x13, #0x4\n" - "blt 119f\n" - "118:" // Height 5: Multiply loop: Odd block loop - "ldr s0, [x12], #0x4\n" - "sub x13, x13, #0x4\n" - "ldr s1, [x9], #0x4\n" - "cmp x13, #0x4\n" - "ldr s2, [x27], #0x4\n" - "ldr s3, [x25], #0x4\n" - "ldr s4, [x23], #0x4\n" + "movi v24.4s, #0x0\n" + "movi v25.4s, #0x0\n" + "movi v26.4s, #0x0\n" + "movi v27.4s, #0x0\n" + "111:" // Height 5: setup done + "mov x14, #0x0\n" + "112:" // Height 5: String loop + "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" + "ldr w13, [x20, x14, LSL #0x2]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" + "tbz %x[flags], #3, 113f\n" + "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x12, [x20, #0x0]\n" + "ldr x11, [x20, #0x8]\n" + "ldr x10, [x20, #0x10]\n" + "ldr x9, [x20, #0x18]\n" + "ldr x28, [x20, #0x20]\n" + "cbnz x14, 114f\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" + "add x12, x12, x20\n" + "add x11, x11, x20\n" + "add x10, x10, x20\n" + "add x9, x9, x20\n" + "add x28, x28, x20\n" + "b 114f\n" + "113:" // Height 5: setup direct input + "mov x12, %x[input_ptr]\n" + "add x11, x12, x21\n" + "add x10, x11, x21\n" + "add x9, x10, x21\n" + "add x28, x9, x21\n" + "114:" // Height 5: input setup done + "cmp x13, #0x10\n" + "blt 117f\n" + "ldr q0, [x12, #0x0]\n" + "cmp x13, #0x20\n" + "ldr q1, [x11, #0x0]\n" + "ldr q2, [x10, #0x0]\n" + "ldr q3, [x9, #0x0]\n" + "ldr q4, [x28, #0x0]\n" "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" "ldr q7, [x15, #0x10]\n" + "blt 116f\n" + "115:" // Height 5: Multiply loop: Main loop head + ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" + "ldr x21, [x15, #0x28]\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" + "ldr x20, [x15, #0x38]\n" ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" + "add x12, x12, #0x10\n" ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" + "add x11, x11, #0x10\n" ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - "ldr q6, [x15, #0x20]\n" + "ldr d29, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" + "mov v29.d[1], x21\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" + "ldr x21, [x15, #0x48]\n" ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" + "add x10, x10, #0x10\n" ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" + "add x9, x9, #0x10\n" ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr d28, [x15, #0x30]\n" + "mov v28.d[1], x20\n" + ".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n" + ".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n" + "ldr x20, [x15, #0x58]\n" + ".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n" + "add x28, x28, #0x10\n" + ".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n" + "ldr x26, [x12, #0x8]\n" + ".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n" + "ldr d29, [x15, #0x40]\n" + ".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n" + "mov v29.d[1], x21\n" + ".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n" + "ldr x21, [x15, #0x68]\n" + ".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n" + "ldr x25, [x11, #0x8]\n" + ".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n" + "ldr x24, [x10, #0x8]\n" + ".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n" + "ldr d28, [x15, #0x50]\n" + "mov v28.d[1], x20\n" + ".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n" + ".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n" + "ldr x20, [x15, #0x78]\n" + ".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n" + "ldr x23, [x9, #0x8]\n" + ".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n" + "ldr x22, [x28, #0x8]\n" + ".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n" + "ldr d29, [x15, #0x60]\n" + ".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n" + "mov v29.d[1], x21\n" + ".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n" + "ldr x21, [x15, #0x88]\n" + ".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n" + "sub x13, x13, #0x10\n" + ".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n" + "cmp x13, #0x20\n" + ".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n" + "ldr d28, [x15, #0x70]\n" + "mov v28.d[1], x20\n" + ".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n" + ".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n" + "ldr x20, [x15, #0x98]\n" + ".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n" + "prfm pldl1keep, [x12, #0x80]\n" + ".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n" + "prfm pldl1keep, [x11, #0x80]\n" + ".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n" + "ldr d29, [x15, #0x80]\n" + ".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n" + "mov v29.d[1], x21\n" + ".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n" + "ldr x21, [x15, #0xa8]\n" + ".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n" + "prfm pldl1keep, [x10, #0x80]\n" + ".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n" + "prfm pldl1keep, [x9, #0x80]\n" + ".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n" + "ldr d28, [x15, #0x90]\n" + "mov v28.d[1], x20\n" + ".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n" + ".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n" + "ldr x20, [x15, #0xb8]\n" + ".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n" + "prfm pldl1keep, [x28, #0x80]\n" + ".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n" + ".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n" + "ldr d29, [x15, #0xa0]\n" + ".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n" + "mov v29.d[1], x21\n" + ".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n" + "ldr x21, [x15, #0xc8]\n" + ".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n" + ".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n" + ".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n" + "ldr d28, [x15, #0xb0]\n" + "mov v28.d[1], x20\n" + ".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n" + ".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n" + "ldr x20, [x15, #0xd8]\n" + ".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n" + ".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n" + ".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n" + "ldr d29, [x15, #0xc0]\n" + ".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n" + "mov v29.d[1], x21\n" + ".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n" + "ldr x21, [x15, #0xe8]\n" + ".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n" + ".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n" + ".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n" + "ldr d28, [x15, #0xd0]\n" + "mov v28.d[1], x20\n" + ".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n" + ".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n" + "ldr x20, [x15, #0xf8]\n" + ".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n" + ".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n" + ".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n" + "ldr d29, [x15, #0xe0]\n" + ".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n" + "mov v29.d[1], x21\n" + ".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n" + ".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n" + ".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n" + ".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n" + "ldr d28, [x15, #0xf0]\n" + "mov v28.d[1], x20\n" + "add x15, x15, #0x100\n" + ".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n" + "ldr x21, [x15, #0x8]\n" + ".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n" + "ldr x20, [x15, #0x18]\n" + ".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n" + ".inst 0x4fa3ebb6 // sdot v22.4s, v29.16b, v3.4b[3]\n" + ".inst 0x4fa4ebba // sdot v26.4s, v29.16b, v4.4b[3]\n" + "ldr d6, [x15, #0x0]\n" + ".inst 0x4fa0eb8b // sdot v11.4s, v28.16b, v0.4b[3]\n" + "ldr d0, [x12, #0x0]\n" + ".inst 0x4fa1eb8f // sdot v15.4s, v28.16b, v1.4b[3]\n" + "ldr d1, [x11, #0x0]\n" + ".inst 0x4fa2eb93 // sdot v19.4s, v28.16b, v2.4b[3]\n" + "ldr d2, [x10, #0x0]\n" + ".inst 0x4fa3eb97 // sdot v23.4s, v28.16b, v3.4b[3]\n" + "ldr d3, [x9, #0x0]\n" + ".inst 0x4fa4eb9b // sdot v27.4s, v28.16b, v4.4b[3]\n" + "ldr d4, [x28, #0x0]\n" + "ldr d7, [x15, #0x10]\n" + "mov v6.d[1], x21\n" + "mov v0.d[1], x26\n" + "mov v1.d[1], x25\n" + "mov v2.d[1], x24\n" + "mov v3.d[1], x23\n" + "mov v4.d[1], x22\n" + "mov v7.d[1], x20\n" + "bge 115b\n" + "116:" // Height 5: Multiply loop: Single iteration only + ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" + "add x12, x12, #0x10\n" + ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" + "add x11, x11, #0x10\n" + ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" + "add x10, x10, #0x10\n" + ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" + "add x9, x9, #0x10\n" + ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" + "ldr q29, [x15, #0x20]\n" + ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" + "add x28, x28, #0x10\n" + ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" + "sub x13, x13, #0x10\n" + ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" + "prfm pldl1keep, [x12, #0x80]\n" + ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" + "prfm pldl1keep, [x11, #0x80]\n" + ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" + "ldr q28, [x15, #0x30]\n" + ".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n" + "prfm pldl1keep, [x10, #0x80]\n" + ".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n" + "prfm pldl1keep, [x9, #0x80]\n" + ".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n" + "prfm pldl1keep, [x28, #0x80]\n" + ".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n" + ".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n" + "ldr q29, [x15, #0x40]\n" + ".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n" + ".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n" + ".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n" + ".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n" + ".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n" + "ldr q28, [x15, #0x50]\n" + ".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n" + ".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n" + ".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n" + ".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n" + ".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n" + "ldr q29, [x15, #0x60]\n" + ".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n" + ".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n" + ".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n" + ".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n" + ".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n" + "ldr q28, [x15, #0x70]\n" + ".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n" + ".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n" + ".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n" + ".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n" + ".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n" + "ldr q29, [x15, #0x80]\n" + ".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n" + ".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n" + ".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n" + ".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n" + ".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n" + "ldr q28, [x15, #0x90]\n" + ".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n" + ".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n" + ".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n" + ".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n" + ".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n" + "ldr q29, [x15, #0xa0]\n" + ".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n" + ".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n" + ".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n" + ".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n" + ".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n" + "ldr q28, [x15, #0xb0]\n" + ".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n" + ".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n" + ".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n" + ".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n" + ".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n" + "ldr q29, [x15, #0xc0]\n" + ".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n" + ".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n" + ".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n" + ".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n" + ".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n" + "ldr q28, [x15, #0xd0]\n" + ".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n" + ".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n" + ".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n" + ".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n" + ".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n" + "ldr q29, [x15, #0xe0]\n" + ".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n" + ".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n" + ".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n" + ".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n" + ".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n" + "ldr q28, [x15, #0xf0]\n" + ".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n" + "add x15, x15, #0x100\n" + ".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n" + ".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n" + ".inst 0x4fa3ebb6 // sdot v22.4s, v29.16b, v3.4b[3]\n" + ".inst 0x4fa4ebba // sdot v26.4s, v29.16b, v4.4b[3]\n" + ".inst 0x4fa0eb8b // sdot v11.4s, v28.16b, v0.4b[3]\n" + ".inst 0x4fa1eb8f // sdot v15.4s, v28.16b, v1.4b[3]\n" + ".inst 0x4fa2eb93 // sdot v19.4s, v28.16b, v2.4b[3]\n" + ".inst 0x4fa3eb97 // sdot v23.4s, v28.16b, v3.4b[3]\n" + ".inst 0x4fa4eb9b // sdot v27.4s, v28.16b, v4.4b[3]\n" + "117:" // Height 5: Multiply loop: Main loop skip + "cbz x13, 122f\n" + "cmp x13, #0x4\n" + "blt 119f\n" + "118:" // Height 5: Multiply loop: Odd block loop + "ldr s2, [x12], #0x4\n" + "sub x13, x13, #0x4\n" + "ldr s1, [x11], #0x4\n" + "cmp x13, #0x4\n" + "ldr s0, [x10], #0x4\n" + "ldr s31, [x9], #0x4\n" + "ldr s30, [x28], #0x4\n" + "ldr q29, [x15, #0x0]\n" + ".inst 0x4f82e3a8 // sdot v8.4s, v29.16b, v2.4b[0]\n" + "ldr q28, [x15, #0x10]\n" + ".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n" + ".inst 0x4f80e3b0 // sdot v16.4s, v29.16b, v0.4b[0]\n" + ".inst 0x4f9fe3b4 // sdot v20.4s, v29.16b, v31.4b[0]\n" + ".inst 0x4f9ee3b8 // sdot v24.4s, v29.16b, v30.4b[0]\n" + "ldr q29, [x15, #0x20]\n" + ".inst 0x4f82e389 // sdot v9.4s, v28.16b, v2.4b[0]\n" + ".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n" + ".inst 0x4f80e391 // sdot v17.4s, v28.16b, v0.4b[0]\n" + ".inst 0x4f9fe395 // sdot v21.4s, v28.16b, v31.4b[0]\n" + ".inst 0x4f9ee399 // sdot v25.4s, v28.16b, v30.4b[0]\n" + "ldr q28, [x15, #0x30]\n" + ".inst 0x4f82e3aa // sdot v10.4s, v29.16b, v2.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n" + ".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n" + ".inst 0x4f80e3b2 // sdot v18.4s, v29.16b, v0.4b[0]\n" + ".inst 0x4f9fe3b6 // sdot v22.4s, v29.16b, v31.4b[0]\n" + ".inst 0x4f9ee3ba // sdot v26.4s, v29.16b, v30.4b[0]\n" + ".inst 0x4f82e38b // sdot v11.4s, v28.16b, v2.4b[0]\n" + ".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n" + ".inst 0x4f80e393 // sdot v19.4s, v28.16b, v0.4b[0]\n" + ".inst 0x4f9fe397 // sdot v23.4s, v28.16b, v31.4b[0]\n" + ".inst 0x4f9ee39b // sdot v27.4s, v28.16b, v30.4b[0]\n" "bge 118b\n" "119:" // Height 5: Multiply loop: Skip odd blocks "cbz x13, 122f\n" "tbz x13, #1, 120f\n" "ldr h0, [x12], #0x2\n" - "ldr h1, [x9], #0x2\n" - "ldr h2, [x27], #0x2\n" - "ldr h3, [x25], #0x2\n" - "ldr h4, [x23], #0x2\n" + "ldr h1, [x11], #0x2\n" + "ldr h2, [x10], #0x2\n" + "ldr h3, [x9], #0x2\n" + "ldr h4, [x28], #0x2\n" "tbz x13, #0, 121f\n" "ld1 { v0.b }[2], [x12]\n" - "ld1 { v1.b }[2], [x9]\n" - "ld1 { v2.b }[2], [x27]\n" - "ld1 { v3.b }[2], [x25]\n" - "ld1 { v4.b }[2], [x23]\n" + "ld1 { v1.b }[2], [x11]\n" + "ld1 { v2.b }[2], [x10]\n" + "ld1 { v3.b }[2], [x9]\n" + "ld1 { v4.b }[2], [x28]\n" "b 121f\n" "120:" // Height 5: Multiply loop: Ragged operand read: partial_1_0 "ldr b0, [x12, #0x0]\n" - "ldr b1, [x9, #0x0]\n" - "ldr b2, [x27, #0x0]\n" - "ldr b3, [x25, #0x0]\n" - "ldr b4, [x23, #0x0]\n" + "ldr b1, [x11, #0x0]\n" + "ldr b2, [x10, #0x0]\n" + "ldr b3, [x9, #0x0]\n" + "ldr b4, [x28, #0x0]\n" "121:" // Height 5: Multiply loop: Ragged operand read: Done - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q29, [x15, #0x0]\n" + ".inst 0x4f80e3a8 // sdot v8.4s, v29.16b, v0.4b[0]\n" + "ldr q28, [x15, #0x10]\n" + ".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n" + ".inst 0x4f82e3b0 // sdot v16.4s, v29.16b, v2.4b[0]\n" + ".inst 0x4f83e3b4 // sdot v20.4s, v29.16b, v3.4b[0]\n" + ".inst 0x4f84e3b8 // sdot v24.4s, v29.16b, v4.4b[0]\n" + "ldr q29, [x15, #0x20]\n" + ".inst 0x4f80e389 // sdot v9.4s, v28.16b, v0.4b[0]\n" + ".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n" + ".inst 0x4f82e391 // sdot v17.4s, v28.16b, v2.4b[0]\n" + ".inst 0x4f83e395 // sdot v21.4s, v28.16b, v3.4b[0]\n" + ".inst 0x4f84e399 // sdot v25.4s, v28.16b, v4.4b[0]\n" + "ldr q28, [x15, #0x30]\n" + ".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n" + ".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n" + ".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n" + ".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n" + ".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n" + ".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n" + ".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n" + ".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n" + ".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n" + ".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n" "122:" // Height 5: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x14, x14, #0x1\n" "cmp x14, x20\n" "bne 112b\n" - "ldr q0, [x6, #0x0]\n" - "add v8.4s, v8.4s, v0.4s\n" - "ldr q1, [x6, #0x10]\n" - "add v9.4s, v9.4s, v1.4s\n" - "ldr q2, [x6, #0x20]\n" - "add v10.4s, v10.4s, v2.4s\n" - "ldr q3, [x6, #0x30]\n" - "add v11.4s, v11.4s, v3.4s\n" + "ldr q31, [x6, #0x0]\n" + "add v8.4s, v8.4s, v31.4s\n" + "ldr q30, [x6, #0x10]\n" + "add v9.4s, v9.4s, v30.4s\n" + "ldr q29, [x6, #0x20]\n" + "add v10.4s, v10.4s, v29.4s\n" + "ldr q28, [x6, #0x30]\n" + "add v11.4s, v11.4s, v28.4s\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "add x24, x17, x20\n" + "add x25, x17, x20\n" + "add x24, x25, x20\n" "add x23, x24, x20\n" "add x22, x23, x20\n" - "add x21, x22, x20\n" "prfm pstl1keep, [x17, #0x0]\n" + "prfm pstl1keep, [x25, #0x0]\n" + "add v12.4s, v12.4s, v31.4s\n" "prfm pstl1keep, [x24, #0x0]\n" - "add v12.4s, v12.4s, v0.4s\n" + "add v13.4s, v13.4s, v30.4s\n" "prfm pstl1keep, [x23, #0x0]\n" - "add v13.4s, v13.4s, v1.4s\n" + "add v14.4s, v14.4s, v29.4s\n" "prfm pstl1keep, [x22, #0x0]\n" - "add v14.4s, v14.4s, v2.4s\n" - "prfm pstl1keep, [x21, #0x0]\n" - "add v15.4s, v15.4s, v3.4s\n" - "add v16.4s, v16.4s, v0.4s\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add v19.4s, v19.4s, v3.4s\n" - "add v20.4s, v20.4s, v0.4s\n" - "add v21.4s, v21.4s, v1.4s\n" - "add v22.4s, v22.4s, v2.4s\n" - "add v23.4s, v23.4s, v3.4s\n" - "add v24.4s, v24.4s, v0.4s\n" - "add v25.4s, v25.4s, v1.4s\n" - "add v26.4s, v26.4s, v2.4s\n" - "add v27.4s, v27.4s, v3.4s\n" + "add v15.4s, v15.4s, v28.4s\n" + "add v16.4s, v16.4s, v31.4s\n" + "add v17.4s, v17.4s, v30.4s\n" + "add v18.4s, v18.4s, v29.4s\n" + "add v19.4s, v19.4s, v28.4s\n" + "add v20.4s, v20.4s, v31.4s\n" + "add v21.4s, v21.4s, v30.4s\n" + "add v22.4s, v22.4s, v29.4s\n" + "add v23.4s, v23.4s, v28.4s\n" + "add v24.4s, v24.4s, v31.4s\n" + "add v25.4s, v25.4s, v30.4s\n" + "add v26.4s, v26.4s, v29.4s\n" + "add v27.4s, v27.4s, v28.4s\n" "add x6, x6, #0x40\n" "tbz %x[flags], #4, 123f\n" "ldr q0, [x8, #0x0]\n" @@ -2536,10 +2535,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "add x7, x7, #0x40\n" "b 124f\n" "123:" // Height 5: per layer parameters - "add x25, %x[qp], %[per_layer_right_shift]\n" - "ld1r { v0.4s }, [x25]\n" - "add x25, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x25]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "ld1r { v0.4s }, [x20]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v4.4s }, [x20]\n" "mov v1.16b, v0.16b\n" "mov v5.16b, v4.16b\n" "mov v2.16b, v0.16b\n" @@ -2568,66 +2567,66 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "sqrdmulh v26.4s, v26.4s, v6.4s\n" "sqrdmulh v27.4s, v27.4s, v7.4s\n" "tbz %x[flags], #5, 125f\n" - "and v4.16b, v8.16b, v0.16b\n" - "and v5.16b, v9.16b, v1.16b\n" - "and v6.16b, v10.16b, v2.16b\n" - "and v7.16b, v11.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v8.4s, v8.4s, v4.4s\n" - "sqadd v9.4s, v9.4s, v5.4s\n" - "sqadd v10.4s, v10.4s, v6.4s\n" - "sqadd v11.4s, v11.4s, v7.4s\n" - "and v4.16b, v12.16b, v0.16b\n" - "and v5.16b, v13.16b, v1.16b\n" - "and v6.16b, v14.16b, v2.16b\n" - "and v7.16b, v15.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v12.4s, v12.4s, v4.4s\n" - "sqadd v13.4s, v13.4s, v5.4s\n" - "sqadd v14.4s, v14.4s, v6.4s\n" - "sqadd v15.4s, v15.4s, v7.4s\n" - "and v4.16b, v16.16b, v0.16b\n" - "and v5.16b, v17.16b, v1.16b\n" - "and v6.16b, v18.16b, v2.16b\n" - "and v7.16b, v19.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v16.4s, v16.4s, v4.4s\n" - "sqadd v17.4s, v17.4s, v5.4s\n" - "sqadd v18.4s, v18.4s, v6.4s\n" - "sqadd v19.4s, v19.4s, v7.4s\n" - "and v4.16b, v20.16b, v0.16b\n" - "and v5.16b, v21.16b, v1.16b\n" - "and v6.16b, v22.16b, v2.16b\n" - "and v7.16b, v23.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "sqadd v21.4s, v21.4s, v5.4s\n" - "sqadd v22.4s, v22.4s, v6.4s\n" - "sqadd v23.4s, v23.4s, v7.4s\n" - "and v4.16b, v24.16b, v0.16b\n" - "and v5.16b, v25.16b, v1.16b\n" - "and v6.16b, v26.16b, v2.16b\n" - "and v7.16b, v27.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v24.4s, v24.4s, v4.4s\n" - "sqadd v25.4s, v25.4s, v5.4s\n" - "sqadd v26.4s, v26.4s, v6.4s\n" - "sqadd v27.4s, v27.4s, v7.4s\n" + "and v31.16b, v8.16b, v0.16b\n" + "and v30.16b, v9.16b, v1.16b\n" + "and v29.16b, v10.16b, v2.16b\n" + "and v28.16b, v11.16b, v3.16b\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sqadd v8.4s, v8.4s, v31.4s\n" + "sqadd v9.4s, v9.4s, v30.4s\n" + "sqadd v10.4s, v10.4s, v29.4s\n" + "sqadd v11.4s, v11.4s, v28.4s\n" + "and v31.16b, v12.16b, v0.16b\n" + "and v30.16b, v13.16b, v1.16b\n" + "and v29.16b, v14.16b, v2.16b\n" + "and v28.16b, v15.16b, v3.16b\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sqadd v12.4s, v12.4s, v31.4s\n" + "sqadd v13.4s, v13.4s, v30.4s\n" + "sqadd v14.4s, v14.4s, v29.4s\n" + "sqadd v15.4s, v15.4s, v28.4s\n" + "and v31.16b, v16.16b, v0.16b\n" + "and v30.16b, v17.16b, v1.16b\n" + "and v29.16b, v18.16b, v2.16b\n" + "and v28.16b, v19.16b, v3.16b\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sqadd v16.4s, v16.4s, v31.4s\n" + "sqadd v17.4s, v17.4s, v30.4s\n" + "sqadd v18.4s, v18.4s, v29.4s\n" + "sqadd v19.4s, v19.4s, v28.4s\n" + "and v31.16b, v20.16b, v0.16b\n" + "and v30.16b, v21.16b, v1.16b\n" + "and v29.16b, v22.16b, v2.16b\n" + "and v28.16b, v23.16b, v3.16b\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sqadd v20.4s, v20.4s, v31.4s\n" + "sqadd v21.4s, v21.4s, v30.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sqadd v23.4s, v23.4s, v28.4s\n" + "and v31.16b, v24.16b, v0.16b\n" + "and v30.16b, v25.16b, v1.16b\n" + "and v29.16b, v26.16b, v2.16b\n" + "and v28.16b, v27.16b, v3.16b\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "sqadd v25.4s, v25.4s, v30.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "sqadd v27.4s, v27.4s, v28.4s\n" "125:" // Height 5: no shift correction "srshl v8.4s, v8.4s, v0.4s\n" "srshl v9.4s, v9.4s, v1.4s\n" @@ -2649,201 +2648,201 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "srshl v25.4s, v25.4s, v1.4s\n" "srshl v26.4s, v26.4s, v2.4s\n" "srshl v27.4s, v27.4s, v3.4s\n" - "add x25, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x25]\n" - "add v8.4s, v8.4s, v4.4s\n" - "add v9.4s, v9.4s, v4.4s\n" - "add v10.4s, v10.4s, v4.4s\n" - "add v11.4s, v11.4s, v4.4s\n" - "add v12.4s, v12.4s, v4.4s\n" - "add v13.4s, v13.4s, v4.4s\n" - "add v14.4s, v14.4s, v4.4s\n" - "add v15.4s, v15.4s, v4.4s\n" - "add v16.4s, v16.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" - "add v20.4s, v20.4s, v4.4s\n" - "add v21.4s, v21.4s, v4.4s\n" - "add v22.4s, v22.4s, v4.4s\n" - "add v23.4s, v23.4s, v4.4s\n" - "add v24.4s, v24.4s, v4.4s\n" - "add v25.4s, v25.4s, v4.4s\n" - "add v26.4s, v26.4s, v4.4s\n" - "add v27.4s, v27.4s, v4.4s\n" - "add x25, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x25]\n" - "smin v8.4s, v8.4s, v6.4s\n" - "smin v9.4s, v9.4s, v6.4s\n" - "smin v10.4s, v10.4s, v6.4s\n" - "smin v11.4s, v11.4s, v6.4s\n" - "smin v12.4s, v12.4s, v6.4s\n" - "smin v13.4s, v13.4s, v6.4s\n" - "smin v14.4s, v14.4s, v6.4s\n" - "smin v15.4s, v15.4s, v6.4s\n" - "smin v16.4s, v16.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "smin v20.4s, v20.4s, v6.4s\n" - "smin v21.4s, v21.4s, v6.4s\n" - "smin v22.4s, v22.4s, v6.4s\n" - "smin v23.4s, v23.4s, v6.4s\n" - "smin v24.4s, v24.4s, v6.4s\n" - "smin v25.4s, v25.4s, v6.4s\n" - "smin v26.4s, v26.4s, v6.4s\n" - "smin v27.4s, v27.4s, v6.4s\n" - "add x25, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x25]\n" - "smax v8.4s, v8.4s, v5.4s\n" - "smax v9.4s, v9.4s, v5.4s\n" - "smax v10.4s, v10.4s, v5.4s\n" - "smax v11.4s, v11.4s, v5.4s\n" - "smax v12.4s, v12.4s, v5.4s\n" - "smax v13.4s, v13.4s, v5.4s\n" - "smax v14.4s, v14.4s, v5.4s\n" - "smax v15.4s, v15.4s, v5.4s\n" - "smax v16.4s, v16.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" - "smax v20.4s, v20.4s, v5.4s\n" - "smax v21.4s, v21.4s, v5.4s\n" - "smax v22.4s, v22.4s, v5.4s\n" - "smax v23.4s, v23.4s, v5.4s\n" - "smax v24.4s, v24.4s, v5.4s\n" - "smax v25.4s, v25.4s, v5.4s\n" - "smax v26.4s, v26.4s, v5.4s\n" - "smax v27.4s, v27.4s, v5.4s\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v28.4s }, [x20]\n" + "add v8.4s, v8.4s, v28.4s\n" + "add v9.4s, v9.4s, v28.4s\n" + "add v10.4s, v10.4s, v28.4s\n" + "add v11.4s, v11.4s, v28.4s\n" + "add v12.4s, v12.4s, v28.4s\n" + "add v13.4s, v13.4s, v28.4s\n" + "add v14.4s, v14.4s, v28.4s\n" + "add v15.4s, v15.4s, v28.4s\n" + "add v16.4s, v16.4s, v28.4s\n" + "add v17.4s, v17.4s, v28.4s\n" + "add v18.4s, v18.4s, v28.4s\n" + "add v19.4s, v19.4s, v28.4s\n" + "add v20.4s, v20.4s, v28.4s\n" + "add v21.4s, v21.4s, v28.4s\n" + "add v22.4s, v22.4s, v28.4s\n" + "add v23.4s, v23.4s, v28.4s\n" + "add v24.4s, v24.4s, v28.4s\n" + "add v25.4s, v25.4s, v28.4s\n" + "add v26.4s, v26.4s, v28.4s\n" + "add v27.4s, v27.4s, v28.4s\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v28.4s }, [x20]\n" + "smin v8.4s, v8.4s, v28.4s\n" + "smin v9.4s, v9.4s, v28.4s\n" + "smin v10.4s, v10.4s, v28.4s\n" + "smin v11.4s, v11.4s, v28.4s\n" + "smin v12.4s, v12.4s, v28.4s\n" + "smin v13.4s, v13.4s, v28.4s\n" + "smin v14.4s, v14.4s, v28.4s\n" + "smin v15.4s, v15.4s, v28.4s\n" + "smin v16.4s, v16.4s, v28.4s\n" + "smin v17.4s, v17.4s, v28.4s\n" + "smin v18.4s, v18.4s, v28.4s\n" + "smin v19.4s, v19.4s, v28.4s\n" + "smin v20.4s, v20.4s, v28.4s\n" + "smin v21.4s, v21.4s, v28.4s\n" + "smin v22.4s, v22.4s, v28.4s\n" + "smin v23.4s, v23.4s, v28.4s\n" + "smin v24.4s, v24.4s, v28.4s\n" + "smin v25.4s, v25.4s, v28.4s\n" + "smin v26.4s, v26.4s, v28.4s\n" + "smin v27.4s, v27.4s, v28.4s\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v28.4s }, [x20]\n" + "smax v8.4s, v8.4s, v28.4s\n" + "smax v9.4s, v9.4s, v28.4s\n" + "smax v10.4s, v10.4s, v28.4s\n" + "smax v11.4s, v11.4s, v28.4s\n" + "smax v12.4s, v12.4s, v28.4s\n" + "smax v13.4s, v13.4s, v28.4s\n" + "smax v14.4s, v14.4s, v28.4s\n" + "smax v15.4s, v15.4s, v28.4s\n" + "smax v16.4s, v16.4s, v28.4s\n" + "smax v17.4s, v17.4s, v28.4s\n" + "smax v18.4s, v18.4s, v28.4s\n" + "smax v19.4s, v19.4s, v28.4s\n" + "smax v20.4s, v20.4s, v28.4s\n" + "smax v21.4s, v21.4s, v28.4s\n" + "smax v22.4s, v22.4s, v28.4s\n" + "smax v23.4s, v23.4s, v28.4s\n" + "smax v24.4s, v24.4s, v28.4s\n" + "smax v25.4s, v25.4s, v28.4s\n" + "smax v26.4s, v26.4s, v28.4s\n" + "smax v27.4s, v27.4s, v28.4s\n" "uzp1 v8.8h, v8.8h, v9.8h\n" - "uzp1 v9.8h, v10.8h, v11.8h\n" + "uzp1 v29.8h, v10.8h, v11.8h\n" "uzp1 v12.8h, v12.8h, v13.8h\n" - "uzp1 v13.8h, v14.8h, v15.8h\n" + "uzp1 v28.8h, v14.8h, v15.8h\n" "uzp1 v16.8h, v16.8h, v17.8h\n" - "uzp1 v17.8h, v18.8h, v19.8h\n" + "uzp1 v19.8h, v18.8h, v19.8h\n" "uzp1 v20.8h, v20.8h, v21.8h\n" - "uzp1 v21.8h, v22.8h, v23.8h\n" + "uzp1 v18.8h, v22.8h, v23.8h\n" "uzp1 v24.8h, v24.8h, v25.8h\n" - "uzp1 v25.8h, v26.8h, v27.8h\n" + "uzp1 v17.8h, v26.8h, v27.8h\n" "cmp x16, #0x10\n" - "uzp1 v8.16b, v8.16b, v9.16b\n" - "uzp1 v12.16b, v12.16b, v13.16b\n" - "uzp1 v16.16b, v16.16b, v17.16b\n" - "uzp1 v20.16b, v20.16b, v21.16b\n" - "uzp1 v24.16b, v24.16b, v25.16b\n" + "uzp1 v8.16b, v8.16b, v29.16b\n" + "uzp1 v12.16b, v12.16b, v28.16b\n" + "uzp1 v16.16b, v16.16b, v19.16b\n" + "uzp1 v20.16b, v20.16b, v18.16b\n" + "uzp1 v24.16b, v24.16b, v17.16b\n" "bge 134f\n" "tbz x16, #3, 129f\n" "str d8, [x17], #0x8\n" - "str d12, [x24], #0x8\n" - "str d16, [x23], #0x8\n" - "str d20, [x22], #0x8\n" - "str d24, [x21], #0x8\n" + "str d12, [x25], #0x8\n" + "str d16, [x24], #0x8\n" + "str d20, [x23], #0x8\n" + "str d24, [x22], #0x8\n" "tbz x16, #2, 127f\n" "st1 { v8.s }[2], [x17], #0x4\n" - "st1 { v12.s }[2], [x24], #0x4\n" - "st1 { v16.s }[2], [x23], #0x4\n" - "st1 { v20.s }[2], [x22], #0x4\n" - "st1 { v24.s }[2], [x21], #0x4\n" + "st1 { v12.s }[2], [x25], #0x4\n" + "st1 { v16.s }[2], [x24], #0x4\n" + "st1 { v20.s }[2], [x23], #0x4\n" + "st1 { v24.s }[2], [x22], #0x4\n" "tbz x16, #1, 126f\n" "st1 { v8.h }[6], [x17], #0x2\n" - "st1 { v12.h }[6], [x24], #0x2\n" - "st1 { v16.h }[6], [x23], #0x2\n" - "st1 { v20.h }[6], [x22], #0x2\n" - "st1 { v24.h }[6], [x21], #0x2\n" + "st1 { v12.h }[6], [x25], #0x2\n" + "st1 { v16.h }[6], [x24], #0x2\n" + "st1 { v20.h }[6], [x23], #0x2\n" + "st1 { v24.h }[6], [x22], #0x2\n" "tbz x16, #0, 133f\n" "st1 { v8.b }[14], [x17]\n" - "st1 { v12.b }[14], [x24]\n" - "st1 { v16.b }[14], [x23]\n" - "st1 { v20.b }[14], [x22]\n" - "st1 { v24.b }[14], [x21]\n" + "st1 { v12.b }[14], [x25]\n" + "st1 { v16.b }[14], [x24]\n" + "st1 { v20.b }[14], [x23]\n" + "st1 { v24.b }[14], [x22]\n" "b 133f\n" "126:" // Height 5: Partial direct writeback: partial_1_12 "tbz x16, #0, 133f\n" "st1 { v8.b }[12], [x17]\n" - "st1 { v12.b }[12], [x24]\n" - "st1 { v16.b }[12], [x23]\n" - "st1 { v20.b }[12], [x22]\n" - "st1 { v24.b }[12], [x21]\n" + "st1 { v12.b }[12], [x25]\n" + "st1 { v16.b }[12], [x24]\n" + "st1 { v20.b }[12], [x23]\n" + "st1 { v24.b }[12], [x22]\n" "b 133f\n" "127:" // Height 5: Partial direct writeback: partial_2_8 "tbz x16, #1, 128f\n" "st1 { v8.h }[4], [x17], #0x2\n" - "st1 { v12.h }[4], [x24], #0x2\n" - "st1 { v16.h }[4], [x23], #0x2\n" - "st1 { v20.h }[4], [x22], #0x2\n" - "st1 { v24.h }[4], [x21], #0x2\n" + "st1 { v12.h }[4], [x25], #0x2\n" + "st1 { v16.h }[4], [x24], #0x2\n" + "st1 { v20.h }[4], [x23], #0x2\n" + "st1 { v24.h }[4], [x22], #0x2\n" "tbz x16, #0, 133f\n" "st1 { v8.b }[10], [x17]\n" - "st1 { v12.b }[10], [x24]\n" - "st1 { v16.b }[10], [x23]\n" - "st1 { v20.b }[10], [x22]\n" - "st1 { v24.b }[10], [x21]\n" + "st1 { v12.b }[10], [x25]\n" + "st1 { v16.b }[10], [x24]\n" + "st1 { v20.b }[10], [x23]\n" + "st1 { v24.b }[10], [x22]\n" "b 133f\n" "128:" // Height 5: Partial direct writeback: partial_1_8 "tbz x16, #0, 133f\n" "st1 { v8.b }[8], [x17]\n" - "st1 { v12.b }[8], [x24]\n" - "st1 { v16.b }[8], [x23]\n" - "st1 { v20.b }[8], [x22]\n" - "st1 { v24.b }[8], [x21]\n" + "st1 { v12.b }[8], [x25]\n" + "st1 { v16.b }[8], [x24]\n" + "st1 { v20.b }[8], [x23]\n" + "st1 { v24.b }[8], [x22]\n" "b 133f\n" "129:" // Height 5: Partial direct writeback: partial_4_0 "tbz x16, #2, 131f\n" "str s8, [x17], #0x4\n" - "str s12, [x24], #0x4\n" - "str s16, [x23], #0x4\n" - "str s20, [x22], #0x4\n" - "str s24, [x21], #0x4\n" + "str s12, [x25], #0x4\n" + "str s16, [x24], #0x4\n" + "str s20, [x23], #0x4\n" + "str s24, [x22], #0x4\n" "tbz x16, #1, 130f\n" "st1 { v8.h }[2], [x17], #0x2\n" - "st1 { v12.h }[2], [x24], #0x2\n" - "st1 { v16.h }[2], [x23], #0x2\n" - "st1 { v20.h }[2], [x22], #0x2\n" - "st1 { v24.h }[2], [x21], #0x2\n" + "st1 { v12.h }[2], [x25], #0x2\n" + "st1 { v16.h }[2], [x24], #0x2\n" + "st1 { v20.h }[2], [x23], #0x2\n" + "st1 { v24.h }[2], [x22], #0x2\n" "tbz x16, #0, 133f\n" "st1 { v8.b }[6], [x17]\n" - "st1 { v12.b }[6], [x24]\n" - "st1 { v16.b }[6], [x23]\n" - "st1 { v20.b }[6], [x22]\n" - "st1 { v24.b }[6], [x21]\n" + "st1 { v12.b }[6], [x25]\n" + "st1 { v16.b }[6], [x24]\n" + "st1 { v20.b }[6], [x23]\n" + "st1 { v24.b }[6], [x22]\n" "b 133f\n" "130:" // Height 5: Partial direct writeback: partial_1_4 "tbz x16, #0, 133f\n" "st1 { v8.b }[4], [x17]\n" - "st1 { v12.b }[4], [x24]\n" - "st1 { v16.b }[4], [x23]\n" - "st1 { v20.b }[4], [x22]\n" - "st1 { v24.b }[4], [x21]\n" + "st1 { v12.b }[4], [x25]\n" + "st1 { v16.b }[4], [x24]\n" + "st1 { v20.b }[4], [x23]\n" + "st1 { v24.b }[4], [x22]\n" "b 133f\n" "131:" // Height 5: Partial direct writeback: partial_2_0 "tbz x16, #1, 132f\n" "str h8, [x17], #0x2\n" - "str h12, [x24], #0x2\n" - "str h16, [x23], #0x2\n" - "str h20, [x22], #0x2\n" - "str h24, [x21], #0x2\n" + "str h12, [x25], #0x2\n" + "str h16, [x24], #0x2\n" + "str h20, [x23], #0x2\n" + "str h24, [x22], #0x2\n" "tbz x16, #0, 133f\n" "st1 { v8.b }[2], [x17]\n" - "st1 { v12.b }[2], [x24]\n" - "st1 { v16.b }[2], [x23]\n" - "st1 { v20.b }[2], [x22]\n" - "st1 { v24.b }[2], [x21]\n" + "st1 { v12.b }[2], [x25]\n" + "st1 { v16.b }[2], [x24]\n" + "st1 { v20.b }[2], [x23]\n" + "st1 { v24.b }[2], [x22]\n" "b 133f\n" "132:" // Height 5: Partial direct writeback: partial_1_0 "str b8, [x17, #0x0]\n" - "str b12, [x24, #0x0]\n" - "str b16, [x23, #0x0]\n" - "str b20, [x22, #0x0]\n" - "str b24, [x21, #0x0]\n" + "str b12, [x25, #0x0]\n" + "str b16, [x24, #0x0]\n" + "str b20, [x23, #0x0]\n" + "str b24, [x22, #0x0]\n" "133:" // Height 5: Partial direct writeback: Done "b 135f\n" "134:" // Height 5: Full writeback "str q8, [x17, #0x0]\n" "add x17, x17, #0x10\n" - "str q12, [x24, #0x0]\n" - "str q16, [x23, #0x0]\n" - "str q20, [x22, #0x0]\n" - "str q24, [x21, #0x0]\n" + "str q12, [x25, #0x0]\n" + "str q16, [x24, #0x0]\n" + "str q20, [x23, #0x0]\n" + "str q24, [x22, #0x0]\n" "135:" // Height 5: Writeback done "subs x16, x16, #0x10\n" "bgt 110b\n" @@ -2888,191 +2887,191 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "139:" // Height 6: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w13, [x20, x14, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 140f\n" - "ldr x21, [%x[input_ptr], x14, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x12, [x21, #0x0]\n" - "ldr x9, [x21, #0x8]\n" - "ldr x27, [x21, #0x10]\n" - "ldr x25, [x21, #0x18]\n" - "ldr x23, [x21, #0x20]\n" - "ldr x21, [x21, #0x28]\n" + "ldr x20, [%x[input_ptr], x14, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x12, [x20, #0x0]\n" + "ldr x11, [x20, #0x8]\n" + "ldr x10, [x20, #0x10]\n" + "ldr x9, [x20, #0x18]\n" + "ldr x28, [x20, #0x20]\n" + "ldr x27, [x20, #0x28]\n" "cbnz x14, 141f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x12, x12, x20\n" + "add x11, x11, x20\n" + "add x10, x10, x20\n" "add x9, x9, x20\n" + "add x28, x28, x20\n" "add x27, x27, x20\n" - "add x25, x25, x20\n" - "add x23, x23, x20\n" - "add x21, x21, x20\n" "b 141f\n" "140:" // Height 6: setup direct input "mov x12, %x[input_ptr]\n" - "add x9, x12, x20\n" - "add x27, x9, x20\n" - "add x25, x27, x20\n" - "add x23, x25, x20\n" - "add x21, x23, x20\n" + "add x11, x12, x21\n" + "add x10, x11, x21\n" + "add x9, x10, x21\n" + "add x28, x9, x21\n" + "add x27, x28, x21\n" "141:" // Height 6: input setup done "cmp x13, #0x10\n" "blt 144f\n" "ldr q0, [x12, #0x0]\n" "cmp x13, #0x20\n" - "ldr q1, [x9, #0x0]\n" - "ldr q2, [x27, #0x0]\n" - "ldr q3, [x25, #0x0]\n" - "ldr q4, [x23, #0x0]\n" - "ldr q5, [x21, #0x0]\n" + "ldr q1, [x11, #0x0]\n" + "ldr q2, [x10, #0x0]\n" + "ldr q3, [x9, #0x0]\n" + "ldr q4, [x28, #0x0]\n" + "ldr q5, [x27, #0x0]\n" "ldr q6, [x15, #0x0]\n" "ldr q7, [x15, #0x10]\n" "blt 143f\n" "142:" // Height 6: Multiply loop: Main loop head ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr x20, [x15, #0x28]\n" + "ldr x21, [x15, #0x28]\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x38]\n" + "ldr x20, [x15, #0x38]\n" ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" "add x12, x12, #0x10\n" ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "add x9, x9, #0x10\n" + "add x11, x11, #0x10\n" ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - "add x27, x27, #0x10\n" + "add x10, x10, #0x10\n" ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n" "ldr d6, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" + "mov v6.d[1], x21\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x48]\n" + "ldr x21, [x15, #0x48]\n" ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - "add x25, x25, #0x10\n" + "add x9, x9, #0x10\n" ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "add x23, x23, #0x10\n" + "add x28, x28, #0x10\n" ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - "add x21, x21, #0x10\n" + "add x27, x27, #0x10\n" ".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n" "ldr d7, [x15, #0x30]\n" - "mov v7.d[1], x11\n" + "mov v7.d[1], x20\n" ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "ldr x11, [x15, #0x58]\n" + "ldr x20, [x15, #0x58]\n" ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "ldr x10, [x12, #0x8]\n" + "ldr x26, [x12, #0x8]\n" ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - "ldr x28, [x9, #0x8]\n" + "ldr x25, [x11, #0x8]\n" ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" - "ldr x26, [x27, #0x8]\n" + "ldr x24, [x10, #0x8]\n" ".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n" "ldr d6, [x15, #0x40]\n" ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - "mov v6.d[1], x20\n" + "mov v6.d[1], x21\n" ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - "ldr x20, [x15, #0x68]\n" + "ldr x21, [x15, #0x68]\n" ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - "ldr x24, [x25, #0x8]\n" + "ldr x23, [x9, #0x8]\n" ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" "sub x13, x13, #0x10\n" ".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n" "cmp x13, #0x20\n" ".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n" "ldr d7, [x15, #0x50]\n" - "mov v7.d[1], x11\n" + "mov v7.d[1], x20\n" ".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n" ".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x78]\n" + "ldr x20, [x15, #0x78]\n" ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n" "prfm pldl1keep, [x12, #0x80]\n" ".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n" - "prfm pldl1keep, [x9, #0x80]\n" + "prfm pldl1keep, [x11, #0x80]\n" ".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n" - "prfm pldl1keep, [x27, #0x80]\n" + "prfm pldl1keep, [x10, #0x80]\n" ".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n" "ldr d6, [x15, #0x60]\n" ".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n" - "mov v6.d[1], x20\n" + "mov v6.d[1], x21\n" ".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0x88]\n" + "ldr x21, [x15, #0x88]\n" ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n" - "prfm pldl1keep, [x25, #0x80]\n" + "prfm pldl1keep, [x9, #0x80]\n" ".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n" - "prfm pldl1keep, [x23, #0x80]\n" + "prfm pldl1keep, [x28, #0x80]\n" ".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n" - "prfm pldl1keep, [x21, #0x80]\n" + "prfm pldl1keep, [x27, #0x80]\n" ".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n" "ldr d7, [x15, #0x70]\n" - "mov v7.d[1], x11\n" + "mov v7.d[1], x20\n" ".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n" ".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n" - "ldr x11, [x15, #0x98]\n" + "ldr x20, [x15, #0x98]\n" ".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n" ".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n" ".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n" ".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n" "ldr d6, [x15, #0x80]\n" ".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n" - "mov v6.d[1], x20\n" + "mov v6.d[1], x21\n" ".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n" - "ldr x20, [x15, #0xa8]\n" + "ldr x21, [x15, #0xa8]\n" ".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n" ".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n" ".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n" ".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n" "ldr d7, [x15, #0x90]\n" - "mov v7.d[1], x11\n" + "mov v7.d[1], x20\n" ".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n" ".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xb8]\n" + "ldr x20, [x15, #0xb8]\n" ".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n" ".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n" ".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n" ".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n" "ldr d6, [x15, #0xa0]\n" ".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" + "mov v6.d[1], x21\n" ".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xc8]\n" + "ldr x21, [x15, #0xc8]\n" ".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n" ".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n" ".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n" ".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n" "ldr d7, [x15, #0xb0]\n" - "mov v7.d[1], x11\n" + "mov v7.d[1], x20\n" ".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n" ".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n" - "ldr x11, [x15, #0xd8]\n" + "ldr x20, [x15, #0xd8]\n" ".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n" ".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n" ".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n" ".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n" "ldr d6, [x15, #0xc0]\n" ".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n" - "mov v6.d[1], x20\n" + "mov v6.d[1], x21\n" ".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n" - "ldr x20, [x15, #0xe8]\n" + "ldr x21, [x15, #0xe8]\n" ".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n" ".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n" ".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n" ".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n" "ldr d7, [x15, #0xd0]\n" - "mov v7.d[1], x11\n" + "mov v7.d[1], x20\n" ".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n" ".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n" - "ldr x11, [x15, #0xf8]\n" + "ldr x20, [x15, #0xf8]\n" ".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n" ".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n" ".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n" ".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n" "ldr d6, [x15, #0xe0]\n" ".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n" - "mov v6.d[1], x20\n" + "mov v6.d[1], x21\n" ".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n" - "ldr x22, [x23, #0x8]\n" + "ldr x22, [x28, #0x8]\n" ".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n" ".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n" ".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n" ".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n" "ldr d7, [x15, #0xf0]\n" - "mov v7.d[1], x11\n" + "mov v7.d[1], x20\n" "add x15, x15, #0x100\n" ".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n" "ldr x20, [x15, #0x8]\n" @@ -3085,58 +3084,58 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( ".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n" "ldr d0, [x12, #0x0]\n" ".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n" - "ldr d1, [x9, #0x0]\n" + "ldr d1, [x11, #0x0]\n" ".inst 0x4fa2e8f3 // sdot v19.4s, v7.16b, v2.4b[3]\n" - "ldr d2, [x27, #0x0]\n" + "ldr d2, [x10, #0x0]\n" ".inst 0x4fa3e8f7 // sdot v23.4s, v7.16b, v3.4b[3]\n" - "ldr d3, [x25, #0x0]\n" + "ldr d3, [x9, #0x0]\n" ".inst 0x4fa4e8fb // sdot v27.4s, v7.16b, v4.4b[3]\n" - "ldr d4, [x23, #0x0]\n" + "ldr d4, [x28, #0x0]\n" ".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n" - "ldr d5, [x21, #0x0]\n" + "ldr d5, [x27, #0x0]\n" "ldr d7, [x15, #0x10]\n" "mov v6.d[1], x20\n" - "ldr x20, [x21, #0x8]\n" - "mov v0.d[1], x10\n" - "ldr x11, [x15, #0x18]\n" - "mov v1.d[1], x28\n" - "mov v2.d[1], x26\n" - "mov v3.d[1], x24\n" + "ldr x21, [x27, #0x8]\n" + "mov v0.d[1], x26\n" + "ldr x20, [x15, #0x18]\n" + "mov v1.d[1], x25\n" + "mov v2.d[1], x24\n" + "mov v3.d[1], x23\n" "mov v4.d[1], x22\n" - "mov v5.d[1], x20\n" - "mov v7.d[1], x11\n" + "mov v5.d[1], x21\n" + "mov v7.d[1], x20\n" "bge 142b\n" "143:" // Height 6: Multiply loop: Single iteration only ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" "add x12, x12, #0x10\n" ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - "add x9, x9, #0x10\n" + "add x11, x11, #0x10\n" ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - "add x27, x27, #0x10\n" + "add x10, x10, #0x10\n" ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - "add x25, x25, #0x10\n" + "add x9, x9, #0x10\n" ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - "add x23, x23, #0x10\n" + "add x28, x28, #0x10\n" ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n" "ldr q6, [x15, #0x20]\n" ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - "add x21, x21, #0x10\n" + "add x27, x27, #0x10\n" ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" "sub x13, x13, #0x10\n" ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" "prfm pldl1keep, [x12, #0x80]\n" ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - "prfm pldl1keep, [x9, #0x80]\n" + "prfm pldl1keep, [x11, #0x80]\n" ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - "prfm pldl1keep, [x27, #0x80]\n" + "prfm pldl1keep, [x10, #0x80]\n" ".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n" "ldr q7, [x15, #0x30]\n" ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" - "prfm pldl1keep, [x25, #0x80]\n" + "prfm pldl1keep, [x9, #0x80]\n" ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - "prfm pldl1keep, [x23, #0x80]\n" + "prfm pldl1keep, [x28, #0x80]\n" ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - "prfm pldl1keep, [x21, #0x80]\n" + "prfm pldl1keep, [x27, #0x80]\n" ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" ".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n" @@ -3236,143 +3235,143 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "cmp x13, #0x4\n" "blt 146f\n" "145:" // Height 6: Multiply loop: Odd block loop - "ldr s0, [x12], #0x4\n" + "ldr s7, [x12], #0x4\n" "sub x13, x13, #0x4\n" - "ldr s1, [x9], #0x4\n" + "ldr s6, [x11], #0x4\n" "cmp x13, #0x4\n" + "ldr s5, [x10], #0x4\n" + "ldr s4, [x9], #0x4\n" + "ldr s3, [x28], #0x4\n" "ldr s2, [x27], #0x4\n" - "ldr s3, [x25], #0x4\n" - "ldr s4, [x23], #0x4\n" - "ldr s5, [x21], #0x4\n" - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - ".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q1, [x15, #0x0]\n" + ".inst 0x4f87e028 // sdot v8.4s, v1.16b, v7.4b[0]\n" + "ldr q0, [x15, #0x10]\n" + ".inst 0x4f86e02c // sdot v12.4s, v1.16b, v6.4b[0]\n" + ".inst 0x4f85e030 // sdot v16.4s, v1.16b, v5.4b[0]\n" + ".inst 0x4f84e034 // sdot v20.4s, v1.16b, v4.4b[0]\n" + ".inst 0x4f83e038 // sdot v24.4s, v1.16b, v3.4b[0]\n" + ".inst 0x4f82e03c // sdot v28.4s, v1.16b, v2.4b[0]\n" + "ldr q1, [x15, #0x20]\n" + ".inst 0x4f87e009 // sdot v9.4s, v0.16b, v7.4b[0]\n" + ".inst 0x4f86e00d // sdot v13.4s, v0.16b, v6.4b[0]\n" + ".inst 0x4f85e011 // sdot v17.4s, v0.16b, v5.4b[0]\n" + ".inst 0x4f84e015 // sdot v21.4s, v0.16b, v4.4b[0]\n" + ".inst 0x4f83e019 // sdot v25.4s, v0.16b, v3.4b[0]\n" + ".inst 0x4f82e01d // sdot v29.4s, v0.16b, v2.4b[0]\n" + "ldr q0, [x15, #0x30]\n" + ".inst 0x4f87e02a // sdot v10.4s, v1.16b, v7.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" - ".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n" - ".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n" + ".inst 0x4f86e02e // sdot v14.4s, v1.16b, v6.4b[0]\n" + ".inst 0x4f85e032 // sdot v18.4s, v1.16b, v5.4b[0]\n" + ".inst 0x4f84e036 // sdot v22.4s, v1.16b, v4.4b[0]\n" + ".inst 0x4f83e03a // sdot v26.4s, v1.16b, v3.4b[0]\n" + ".inst 0x4f82e03e // sdot v30.4s, v1.16b, v2.4b[0]\n" + ".inst 0x4f87e00b // sdot v11.4s, v0.16b, v7.4b[0]\n" + ".inst 0x4f86e00f // sdot v15.4s, v0.16b, v6.4b[0]\n" + ".inst 0x4f85e013 // sdot v19.4s, v0.16b, v5.4b[0]\n" + ".inst 0x4f84e017 // sdot v23.4s, v0.16b, v4.4b[0]\n" + ".inst 0x4f83e01b // sdot v27.4s, v0.16b, v3.4b[0]\n" + ".inst 0x4f82e01f // sdot v31.4s, v0.16b, v2.4b[0]\n" "bge 145b\n" "146:" // Height 6: Multiply loop: Skip odd blocks "cbz x13, 149f\n" "tbz x13, #1, 147f\n" "ldr h0, [x12], #0x2\n" - "ldr h1, [x9], #0x2\n" - "ldr h2, [x27], #0x2\n" - "ldr h3, [x25], #0x2\n" - "ldr h4, [x23], #0x2\n" - "ldr h5, [x21], #0x2\n" + "ldr h1, [x11], #0x2\n" + "ldr h2, [x10], #0x2\n" + "ldr h3, [x9], #0x2\n" + "ldr h4, [x28], #0x2\n" + "ldr h5, [x27], #0x2\n" "tbz x13, #0, 148f\n" "ld1 { v0.b }[2], [x12]\n" - "ld1 { v1.b }[2], [x9]\n" - "ld1 { v2.b }[2], [x27]\n" - "ld1 { v3.b }[2], [x25]\n" - "ld1 { v4.b }[2], [x23]\n" - "ld1 { v5.b }[2], [x21]\n" + "ld1 { v1.b }[2], [x11]\n" + "ld1 { v2.b }[2], [x10]\n" + "ld1 { v3.b }[2], [x9]\n" + "ld1 { v4.b }[2], [x28]\n" + "ld1 { v5.b }[2], [x27]\n" "b 148f\n" "147:" // Height 6: Multiply loop: Ragged operand read: partial_1_0 "ldr b0, [x12, #0x0]\n" - "ldr b1, [x9, #0x0]\n" - "ldr b2, [x27, #0x0]\n" - "ldr b3, [x25, #0x0]\n" - "ldr b4, [x23, #0x0]\n" - "ldr b5, [x21, #0x0]\n" + "ldr b1, [x11, #0x0]\n" + "ldr b2, [x10, #0x0]\n" + "ldr b3, [x9, #0x0]\n" + "ldr b4, [x28, #0x0]\n" + "ldr b5, [x27, #0x0]\n" "148:" // Height 6: Multiply loop: Ragged operand read: Done - "ldr q6, [x15, #0x0]\n" - ".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n" - "ldr q7, [x15, #0x10]\n" - ".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n" - ".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n" - "ldr q6, [x15, #0x20]\n" - ".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n" - ".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n" - "ldr q7, [x15, #0x30]\n" - ".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n" + "ldr q7, [x15, #0x0]\n" + ".inst 0x4f80e0e8 // sdot v8.4s, v7.16b, v0.4b[0]\n" + "ldr q6, [x15, #0x10]\n" + ".inst 0x4f81e0ec // sdot v12.4s, v7.16b, v1.4b[0]\n" + ".inst 0x4f82e0f0 // sdot v16.4s, v7.16b, v2.4b[0]\n" + ".inst 0x4f83e0f4 // sdot v20.4s, v7.16b, v3.4b[0]\n" + ".inst 0x4f84e0f8 // sdot v24.4s, v7.16b, v4.4b[0]\n" + ".inst 0x4f85e0fc // sdot v28.4s, v7.16b, v5.4b[0]\n" + "ldr q7, [x15, #0x20]\n" + ".inst 0x4f80e0c9 // sdot v9.4s, v6.16b, v0.4b[0]\n" + ".inst 0x4f81e0cd // sdot v13.4s, v6.16b, v1.4b[0]\n" + ".inst 0x4f82e0d1 // sdot v17.4s, v6.16b, v2.4b[0]\n" + ".inst 0x4f83e0d5 // sdot v21.4s, v6.16b, v3.4b[0]\n" + ".inst 0x4f84e0d9 // sdot v25.4s, v6.16b, v4.4b[0]\n" + ".inst 0x4f85e0dd // sdot v29.4s, v6.16b, v5.4b[0]\n" + "ldr q6, [x15, #0x30]\n" + ".inst 0x4f80e0ea // sdot v10.4s, v7.16b, v0.4b[0]\n" "add x15, x15, #0x40\n" - ".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n" - ".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n" - ".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n" - ".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n" - ".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n" - ".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n" - ".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n" - ".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n" - ".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n" - ".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n" - ".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n" + ".inst 0x4f81e0ee // sdot v14.4s, v7.16b, v1.4b[0]\n" + ".inst 0x4f82e0f2 // sdot v18.4s, v7.16b, v2.4b[0]\n" + ".inst 0x4f83e0f6 // sdot v22.4s, v7.16b, v3.4b[0]\n" + ".inst 0x4f84e0fa // sdot v26.4s, v7.16b, v4.4b[0]\n" + ".inst 0x4f85e0fe // sdot v30.4s, v7.16b, v5.4b[0]\n" + ".inst 0x4f80e0cb // sdot v11.4s, v6.16b, v0.4b[0]\n" + ".inst 0x4f81e0cf // sdot v15.4s, v6.16b, v1.4b[0]\n" + ".inst 0x4f82e0d3 // sdot v19.4s, v6.16b, v2.4b[0]\n" + ".inst 0x4f83e0d7 // sdot v23.4s, v6.16b, v3.4b[0]\n" + ".inst 0x4f84e0db // sdot v27.4s, v6.16b, v4.4b[0]\n" + ".inst 0x4f85e0df // sdot v31.4s, v6.16b, v5.4b[0]\n" "149:" // Height 6: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x14, x14, #0x1\n" "cmp x14, x20\n" "bne 139b\n" - "ldr q0, [x6, #0x0]\n" - "add v8.4s, v8.4s, v0.4s\n" - "ldr q1, [x6, #0x10]\n" - "add v9.4s, v9.4s, v1.4s\n" - "ldr q2, [x6, #0x20]\n" - "add v10.4s, v10.4s, v2.4s\n" - "ldr q3, [x6, #0x30]\n" - "add v11.4s, v11.4s, v3.4s\n" + "ldr q3, [x6, #0x0]\n" + "add v8.4s, v8.4s, v3.4s\n" + "ldr q2, [x6, #0x10]\n" + "add v9.4s, v9.4s, v2.4s\n" + "ldr q1, [x6, #0x20]\n" + "add v10.4s, v10.4s, v1.4s\n" + "ldr q0, [x6, #0x30]\n" + "add v11.4s, v11.4s, v0.4s\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "add x24, x17, x20\n" + "add x25, x17, x20\n" + "add x24, x25, x20\n" "add x23, x24, x20\n" "add x22, x23, x20\n" "add x21, x22, x20\n" - "add x20, x21, x20\n" "prfm pstl1keep, [x17, #0x0]\n" - "add v12.4s, v12.4s, v0.4s\n" + "add v12.4s, v12.4s, v3.4s\n" + "prfm pstl1keep, [x25, #0x0]\n" + "add v13.4s, v13.4s, v2.4s\n" "prfm pstl1keep, [x24, #0x0]\n" - "add v13.4s, v13.4s, v1.4s\n" + "add v14.4s, v14.4s, v1.4s\n" "prfm pstl1keep, [x23, #0x0]\n" - "add v14.4s, v14.4s, v2.4s\n" + "add v15.4s, v15.4s, v0.4s\n" "prfm pstl1keep, [x22, #0x0]\n" - "add v15.4s, v15.4s, v3.4s\n" + "add v16.4s, v16.4s, v3.4s\n" "prfm pstl1keep, [x21, #0x0]\n" - "add v16.4s, v16.4s, v0.4s\n" - "prfm pstl1keep, [x20, #0x0]\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add v19.4s, v19.4s, v3.4s\n" - "add v20.4s, v20.4s, v0.4s\n" - "add v21.4s, v21.4s, v1.4s\n" - "add v22.4s, v22.4s, v2.4s\n" - "add v23.4s, v23.4s, v3.4s\n" - "add v24.4s, v24.4s, v0.4s\n" - "add v25.4s, v25.4s, v1.4s\n" - "add v26.4s, v26.4s, v2.4s\n" - "add v27.4s, v27.4s, v3.4s\n" - "add v28.4s, v28.4s, v0.4s\n" - "add v29.4s, v29.4s, v1.4s\n" - "add v30.4s, v30.4s, v2.4s\n" - "add v31.4s, v31.4s, v3.4s\n" + "add v17.4s, v17.4s, v2.4s\n" + "add v18.4s, v18.4s, v1.4s\n" + "add v19.4s, v19.4s, v0.4s\n" + "add v20.4s, v20.4s, v3.4s\n" + "add v21.4s, v21.4s, v2.4s\n" + "add v22.4s, v22.4s, v1.4s\n" + "add v23.4s, v23.4s, v0.4s\n" + "add v24.4s, v24.4s, v3.4s\n" + "add v25.4s, v25.4s, v2.4s\n" + "add v26.4s, v26.4s, v1.4s\n" + "add v27.4s, v27.4s, v0.4s\n" + "add v28.4s, v28.4s, v3.4s\n" + "add v29.4s, v29.4s, v2.4s\n" + "add v30.4s, v30.4s, v1.4s\n" + "add v31.4s, v31.4s, v0.4s\n" "add x6, x6, #0x40\n" "tbz %x[flags], #4, 150f\n" "ldr q0, [x8, #0x0]\n" @@ -3387,10 +3386,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "add x7, x7, #0x40\n" "b 151f\n" "150:" // Height 6: per layer parameters - "add x25, %x[qp], %[per_layer_right_shift]\n" - "ld1r { v0.4s }, [x25]\n" - "add x25, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x25]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "ld1r { v0.4s }, [x20]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v4.4s }, [x20]\n" "mov v1.16b, v0.16b\n" "mov v5.16b, v4.16b\n" "mov v2.16b, v0.16b\n" @@ -3423,78 +3422,78 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "sqrdmulh v30.4s, v30.4s, v6.4s\n" "sqrdmulh v31.4s, v31.4s, v7.4s\n" "tbz %x[flags], #5, 152f\n" - "and v4.16b, v8.16b, v0.16b\n" - "and v5.16b, v9.16b, v1.16b\n" - "and v6.16b, v10.16b, v2.16b\n" - "and v7.16b, v11.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" + "and v7.16b, v8.16b, v0.16b\n" + "and v6.16b, v9.16b, v1.16b\n" + "and v5.16b, v10.16b, v2.16b\n" + "and v4.16b, v11.16b, v3.16b\n" "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v8.4s, v8.4s, v4.4s\n" - "sqadd v9.4s, v9.4s, v5.4s\n" - "sqadd v10.4s, v10.4s, v6.4s\n" - "sqadd v11.4s, v11.4s, v7.4s\n" - "and v4.16b, v12.16b, v0.16b\n" - "and v5.16b, v13.16b, v1.16b\n" - "and v6.16b, v14.16b, v2.16b\n" - "and v7.16b, v15.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v12.4s, v12.4s, v4.4s\n" - "sqadd v13.4s, v13.4s, v5.4s\n" - "sqadd v14.4s, v14.4s, v6.4s\n" - "sqadd v15.4s, v15.4s, v7.4s\n" - "and v4.16b, v16.16b, v0.16b\n" - "and v5.16b, v17.16b, v1.16b\n" - "and v6.16b, v18.16b, v2.16b\n" - "and v7.16b, v19.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v16.4s, v16.4s, v4.4s\n" - "sqadd v17.4s, v17.4s, v5.4s\n" - "sqadd v18.4s, v18.4s, v6.4s\n" - "sqadd v19.4s, v19.4s, v7.4s\n" - "and v4.16b, v20.16b, v0.16b\n" - "and v5.16b, v21.16b, v1.16b\n" - "and v6.16b, v22.16b, v2.16b\n" - "and v7.16b, v23.16b, v3.16b\n" "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" + "sqadd v8.4s, v8.4s, v7.4s\n" + "sqadd v9.4s, v9.4s, v6.4s\n" + "sqadd v10.4s, v10.4s, v5.4s\n" + "sqadd v11.4s, v11.4s, v4.4s\n" + "and v7.16b, v12.16b, v0.16b\n" + "and v6.16b, v13.16b, v1.16b\n" + "and v5.16b, v14.16b, v2.16b\n" + "and v4.16b, v15.16b, v3.16b\n" "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "sqadd v21.4s, v21.4s, v5.4s\n" - "sqadd v22.4s, v22.4s, v6.4s\n" - "sqadd v23.4s, v23.4s, v7.4s\n" - "and v4.16b, v24.16b, v0.16b\n" - "and v5.16b, v25.16b, v1.16b\n" - "and v6.16b, v26.16b, v2.16b\n" - "and v7.16b, v27.16b, v3.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" "sshr v6.4s, v6.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v12.4s, v12.4s, v7.4s\n" + "sqadd v13.4s, v13.4s, v6.4s\n" + "sqadd v14.4s, v14.4s, v5.4s\n" + "sqadd v15.4s, v15.4s, v4.4s\n" + "and v7.16b, v16.16b, v0.16b\n" + "and v6.16b, v17.16b, v1.16b\n" + "and v5.16b, v18.16b, v2.16b\n" + "and v4.16b, v19.16b, v3.16b\n" "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v24.4s, v24.4s, v4.4s\n" - "sqadd v25.4s, v25.4s, v5.4s\n" - "sqadd v26.4s, v26.4s, v6.4s\n" - "sqadd v27.4s, v27.4s, v7.4s\n" - "and v4.16b, v28.16b, v0.16b\n" - "and v5.16b, v29.16b, v1.16b\n" - "and v6.16b, v30.16b, v2.16b\n" - "and v7.16b, v31.16b, v3.16b\n" + "sshr v6.4s, v6.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v16.4s, v16.4s, v7.4s\n" + "sqadd v17.4s, v17.4s, v6.4s\n" + "sqadd v18.4s, v18.4s, v5.4s\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "and v7.16b, v20.16b, v0.16b\n" + "and v6.16b, v21.16b, v1.16b\n" + "and v5.16b, v22.16b, v2.16b\n" + "and v4.16b, v23.16b, v3.16b\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "sshr v6.4s, v6.4s, #0x1f\n" "sshr v5.4s, v5.4s, #0x1f\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v20.4s, v20.4s, v7.4s\n" + "sqadd v21.4s, v21.4s, v6.4s\n" + "sqadd v22.4s, v22.4s, v5.4s\n" + "sqadd v23.4s, v23.4s, v4.4s\n" + "and v7.16b, v24.16b, v0.16b\n" + "and v6.16b, v25.16b, v1.16b\n" + "and v5.16b, v26.16b, v2.16b\n" + "and v4.16b, v27.16b, v3.16b\n" + "sshr v7.4s, v7.4s, #0x1f\n" "sshr v6.4s, v6.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v24.4s, v24.4s, v7.4s\n" + "sqadd v25.4s, v25.4s, v6.4s\n" + "sqadd v26.4s, v26.4s, v5.4s\n" + "sqadd v27.4s, v27.4s, v4.4s\n" + "and v7.16b, v28.16b, v0.16b\n" + "and v6.16b, v29.16b, v1.16b\n" + "and v5.16b, v30.16b, v2.16b\n" + "and v4.16b, v31.16b, v3.16b\n" "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v28.4s, v28.4s, v4.4s\n" - "sqadd v29.4s, v29.4s, v5.4s\n" - "sqadd v30.4s, v30.4s, v6.4s\n" - "sqadd v31.4s, v31.4s, v7.4s\n" + "sshr v6.4s, v6.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v28.4s, v28.4s, v7.4s\n" + "sqadd v29.4s, v29.4s, v6.4s\n" + "sqadd v30.4s, v30.4s, v5.4s\n" + "sqadd v31.4s, v31.4s, v4.4s\n" "152:" // Height 6: no shift correction "srshl v8.4s, v8.4s, v0.4s\n" "srshl v9.4s, v9.4s, v1.4s\n" @@ -3520,232 +3519,232 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "srshl v29.4s, v29.4s, v1.4s\n" "srshl v30.4s, v30.4s, v2.4s\n" "srshl v31.4s, v31.4s, v3.4s\n" - "add x25, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x25]\n" - "add v8.4s, v8.4s, v4.4s\n" - "add v9.4s, v9.4s, v4.4s\n" - "add v10.4s, v10.4s, v4.4s\n" - "add v11.4s, v11.4s, v4.4s\n" - "add v12.4s, v12.4s, v4.4s\n" - "add v13.4s, v13.4s, v4.4s\n" - "add v14.4s, v14.4s, v4.4s\n" - "add v15.4s, v15.4s, v4.4s\n" - "add v16.4s, v16.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" - "add v20.4s, v20.4s, v4.4s\n" - "add v21.4s, v21.4s, v4.4s\n" - "add v22.4s, v22.4s, v4.4s\n" - "add v23.4s, v23.4s, v4.4s\n" - "add v24.4s, v24.4s, v4.4s\n" - "add v25.4s, v25.4s, v4.4s\n" - "add v26.4s, v26.4s, v4.4s\n" - "add v27.4s, v27.4s, v4.4s\n" - "add v28.4s, v28.4s, v4.4s\n" - "add v29.4s, v29.4s, v4.4s\n" - "add v30.4s, v30.4s, v4.4s\n" - "add v31.4s, v31.4s, v4.4s\n" - "add x25, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x25]\n" - "smin v8.4s, v8.4s, v6.4s\n" - "smin v9.4s, v9.4s, v6.4s\n" - "smin v10.4s, v10.4s, v6.4s\n" - "smin v11.4s, v11.4s, v6.4s\n" - "smin v12.4s, v12.4s, v6.4s\n" - "smin v13.4s, v13.4s, v6.4s\n" - "smin v14.4s, v14.4s, v6.4s\n" - "smin v15.4s, v15.4s, v6.4s\n" - "smin v16.4s, v16.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "smin v20.4s, v20.4s, v6.4s\n" - "smin v21.4s, v21.4s, v6.4s\n" - "smin v22.4s, v22.4s, v6.4s\n" - "smin v23.4s, v23.4s, v6.4s\n" - "smin v24.4s, v24.4s, v6.4s\n" - "smin v25.4s, v25.4s, v6.4s\n" - "smin v26.4s, v26.4s, v6.4s\n" - "smin v27.4s, v27.4s, v6.4s\n" - "smin v28.4s, v28.4s, v6.4s\n" - "smin v29.4s, v29.4s, v6.4s\n" - "smin v30.4s, v30.4s, v6.4s\n" - "smin v31.4s, v31.4s, v6.4s\n" - "add x25, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x25]\n" - "smax v8.4s, v8.4s, v5.4s\n" - "smax v9.4s, v9.4s, v5.4s\n" - "smax v10.4s, v10.4s, v5.4s\n" - "smax v11.4s, v11.4s, v5.4s\n" - "smax v12.4s, v12.4s, v5.4s\n" - "smax v13.4s, v13.4s, v5.4s\n" - "smax v14.4s, v14.4s, v5.4s\n" - "smax v15.4s, v15.4s, v5.4s\n" - "smax v16.4s, v16.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" - "smax v20.4s, v20.4s, v5.4s\n" - "smax v21.4s, v21.4s, v5.4s\n" - "smax v22.4s, v22.4s, v5.4s\n" - "smax v23.4s, v23.4s, v5.4s\n" - "smax v24.4s, v24.4s, v5.4s\n" - "smax v25.4s, v25.4s, v5.4s\n" - "smax v26.4s, v26.4s, v5.4s\n" - "smax v27.4s, v27.4s, v5.4s\n" - "smax v28.4s, v28.4s, v5.4s\n" - "smax v29.4s, v29.4s, v5.4s\n" - "smax v30.4s, v30.4s, v5.4s\n" - "smax v31.4s, v31.4s, v5.4s\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v0.4s }, [x20]\n" + "add v8.4s, v8.4s, v0.4s\n" + "add v9.4s, v9.4s, v0.4s\n" + "add v10.4s, v10.4s, v0.4s\n" + "add v11.4s, v11.4s, v0.4s\n" + "add v12.4s, v12.4s, v0.4s\n" + "add v13.4s, v13.4s, v0.4s\n" + "add v14.4s, v14.4s, v0.4s\n" + "add v15.4s, v15.4s, v0.4s\n" + "add v16.4s, v16.4s, v0.4s\n" + "add v17.4s, v17.4s, v0.4s\n" + "add v18.4s, v18.4s, v0.4s\n" + "add v19.4s, v19.4s, v0.4s\n" + "add v20.4s, v20.4s, v0.4s\n" + "add v21.4s, v21.4s, v0.4s\n" + "add v22.4s, v22.4s, v0.4s\n" + "add v23.4s, v23.4s, v0.4s\n" + "add v24.4s, v24.4s, v0.4s\n" + "add v25.4s, v25.4s, v0.4s\n" + "add v26.4s, v26.4s, v0.4s\n" + "add v27.4s, v27.4s, v0.4s\n" + "add v28.4s, v28.4s, v0.4s\n" + "add v29.4s, v29.4s, v0.4s\n" + "add v30.4s, v30.4s, v0.4s\n" + "add v31.4s, v31.4s, v0.4s\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v0.4s }, [x20]\n" + "smin v8.4s, v8.4s, v0.4s\n" + "smin v9.4s, v9.4s, v0.4s\n" + "smin v10.4s, v10.4s, v0.4s\n" + "smin v11.4s, v11.4s, v0.4s\n" + "smin v12.4s, v12.4s, v0.4s\n" + "smin v13.4s, v13.4s, v0.4s\n" + "smin v14.4s, v14.4s, v0.4s\n" + "smin v15.4s, v15.4s, v0.4s\n" + "smin v16.4s, v16.4s, v0.4s\n" + "smin v17.4s, v17.4s, v0.4s\n" + "smin v18.4s, v18.4s, v0.4s\n" + "smin v19.4s, v19.4s, v0.4s\n" + "smin v20.4s, v20.4s, v0.4s\n" + "smin v21.4s, v21.4s, v0.4s\n" + "smin v22.4s, v22.4s, v0.4s\n" + "smin v23.4s, v23.4s, v0.4s\n" + "smin v24.4s, v24.4s, v0.4s\n" + "smin v25.4s, v25.4s, v0.4s\n" + "smin v26.4s, v26.4s, v0.4s\n" + "smin v27.4s, v27.4s, v0.4s\n" + "smin v28.4s, v28.4s, v0.4s\n" + "smin v29.4s, v29.4s, v0.4s\n" + "smin v30.4s, v30.4s, v0.4s\n" + "smin v31.4s, v31.4s, v0.4s\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v0.4s }, [x20]\n" + "smax v8.4s, v8.4s, v0.4s\n" + "smax v9.4s, v9.4s, v0.4s\n" + "smax v10.4s, v10.4s, v0.4s\n" + "smax v11.4s, v11.4s, v0.4s\n" + "smax v12.4s, v12.4s, v0.4s\n" + "smax v13.4s, v13.4s, v0.4s\n" + "smax v14.4s, v14.4s, v0.4s\n" + "smax v15.4s, v15.4s, v0.4s\n" + "smax v16.4s, v16.4s, v0.4s\n" + "smax v17.4s, v17.4s, v0.4s\n" + "smax v18.4s, v18.4s, v0.4s\n" + "smax v19.4s, v19.4s, v0.4s\n" + "smax v20.4s, v20.4s, v0.4s\n" + "smax v21.4s, v21.4s, v0.4s\n" + "smax v22.4s, v22.4s, v0.4s\n" + "smax v23.4s, v23.4s, v0.4s\n" + "smax v24.4s, v24.4s, v0.4s\n" + "smax v25.4s, v25.4s, v0.4s\n" + "smax v26.4s, v26.4s, v0.4s\n" + "smax v27.4s, v27.4s, v0.4s\n" + "smax v28.4s, v28.4s, v0.4s\n" + "smax v29.4s, v29.4s, v0.4s\n" + "smax v30.4s, v30.4s, v0.4s\n" + "smax v31.4s, v31.4s, v0.4s\n" "uzp1 v8.8h, v8.8h, v9.8h\n" - "uzp1 v9.8h, v10.8h, v11.8h\n" + "uzp1 v2.8h, v10.8h, v11.8h\n" "uzp1 v12.8h, v12.8h, v13.8h\n" - "uzp1 v13.8h, v14.8h, v15.8h\n" + "uzp1 v1.8h, v14.8h, v15.8h\n" "uzp1 v16.8h, v16.8h, v17.8h\n" - "uzp1 v17.8h, v18.8h, v19.8h\n" + "uzp1 v0.8h, v18.8h, v19.8h\n" "uzp1 v20.8h, v20.8h, v21.8h\n" - "uzp1 v21.8h, v22.8h, v23.8h\n" + "uzp1 v19.8h, v22.8h, v23.8h\n" "uzp1 v24.8h, v24.8h, v25.8h\n" - "uzp1 v25.8h, v26.8h, v27.8h\n" + "uzp1 v18.8h, v26.8h, v27.8h\n" "uzp1 v28.8h, v28.8h, v29.8h\n" - "uzp1 v29.8h, v30.8h, v31.8h\n" + "uzp1 v17.8h, v30.8h, v31.8h\n" "cmp x16, #0x10\n" - "uzp1 v8.16b, v8.16b, v9.16b\n" - "uzp1 v12.16b, v12.16b, v13.16b\n" - "uzp1 v16.16b, v16.16b, v17.16b\n" - "uzp1 v20.16b, v20.16b, v21.16b\n" - "uzp1 v24.16b, v24.16b, v25.16b\n" - "uzp1 v28.16b, v28.16b, v29.16b\n" + "uzp1 v8.16b, v8.16b, v2.16b\n" + "uzp1 v12.16b, v12.16b, v1.16b\n" + "uzp1 v16.16b, v16.16b, v0.16b\n" + "uzp1 v20.16b, v20.16b, v19.16b\n" + "uzp1 v24.16b, v24.16b, v18.16b\n" + "uzp1 v28.16b, v28.16b, v17.16b\n" "bge 161f\n" "tbz x16, #3, 156f\n" "str d8, [x17], #0x8\n" - "str d12, [x24], #0x8\n" - "str d16, [x23], #0x8\n" - "str d20, [x22], #0x8\n" - "str d24, [x21], #0x8\n" - "str d28, [x20], #0x8\n" + "str d12, [x25], #0x8\n" + "str d16, [x24], #0x8\n" + "str d20, [x23], #0x8\n" + "str d24, [x22], #0x8\n" + "str d28, [x21], #0x8\n" "tbz x16, #2, 154f\n" "st1 { v8.s }[2], [x17], #0x4\n" - "st1 { v12.s }[2], [x24], #0x4\n" - "st1 { v16.s }[2], [x23], #0x4\n" - "st1 { v20.s }[2], [x22], #0x4\n" - "st1 { v24.s }[2], [x21], #0x4\n" - "st1 { v28.s }[2], [x20], #0x4\n" + "st1 { v12.s }[2], [x25], #0x4\n" + "st1 { v16.s }[2], [x24], #0x4\n" + "st1 { v20.s }[2], [x23], #0x4\n" + "st1 { v24.s }[2], [x22], #0x4\n" + "st1 { v28.s }[2], [x21], #0x4\n" "tbz x16, #1, 153f\n" "st1 { v8.h }[6], [x17], #0x2\n" - "st1 { v12.h }[6], [x24], #0x2\n" - "st1 { v16.h }[6], [x23], #0x2\n" - "st1 { v20.h }[6], [x22], #0x2\n" - "st1 { v24.h }[6], [x21], #0x2\n" - "st1 { v28.h }[6], [x20], #0x2\n" + "st1 { v12.h }[6], [x25], #0x2\n" + "st1 { v16.h }[6], [x24], #0x2\n" + "st1 { v20.h }[6], [x23], #0x2\n" + "st1 { v24.h }[6], [x22], #0x2\n" + "st1 { v28.h }[6], [x21], #0x2\n" "tbz x16, #0, 160f\n" "st1 { v8.b }[14], [x17]\n" - "st1 { v12.b }[14], [x24]\n" - "st1 { v16.b }[14], [x23]\n" - "st1 { v20.b }[14], [x22]\n" - "st1 { v24.b }[14], [x21]\n" - "st1 { v28.b }[14], [x20]\n" + "st1 { v12.b }[14], [x25]\n" + "st1 { v16.b }[14], [x24]\n" + "st1 { v20.b }[14], [x23]\n" + "st1 { v24.b }[14], [x22]\n" + "st1 { v28.b }[14], [x21]\n" "b 160f\n" "153:" // Height 6: Partial direct writeback: partial_1_12 "tbz x16, #0, 160f\n" "st1 { v8.b }[12], [x17]\n" - "st1 { v12.b }[12], [x24]\n" - "st1 { v16.b }[12], [x23]\n" - "st1 { v20.b }[12], [x22]\n" - "st1 { v24.b }[12], [x21]\n" - "st1 { v28.b }[12], [x20]\n" + "st1 { v12.b }[12], [x25]\n" + "st1 { v16.b }[12], [x24]\n" + "st1 { v20.b }[12], [x23]\n" + "st1 { v24.b }[12], [x22]\n" + "st1 { v28.b }[12], [x21]\n" "b 160f\n" "154:" // Height 6: Partial direct writeback: partial_2_8 "tbz x16, #1, 155f\n" "st1 { v8.h }[4], [x17], #0x2\n" - "st1 { v12.h }[4], [x24], #0x2\n" - "st1 { v16.h }[4], [x23], #0x2\n" - "st1 { v20.h }[4], [x22], #0x2\n" - "st1 { v24.h }[4], [x21], #0x2\n" - "st1 { v28.h }[4], [x20], #0x2\n" + "st1 { v12.h }[4], [x25], #0x2\n" + "st1 { v16.h }[4], [x24], #0x2\n" + "st1 { v20.h }[4], [x23], #0x2\n" + "st1 { v24.h }[4], [x22], #0x2\n" + "st1 { v28.h }[4], [x21], #0x2\n" "tbz x16, #0, 160f\n" "st1 { v8.b }[10], [x17]\n" - "st1 { v12.b }[10], [x24]\n" - "st1 { v16.b }[10], [x23]\n" - "st1 { v20.b }[10], [x22]\n" - "st1 { v24.b }[10], [x21]\n" - "st1 { v28.b }[10], [x20]\n" + "st1 { v12.b }[10], [x25]\n" + "st1 { v16.b }[10], [x24]\n" + "st1 { v20.b }[10], [x23]\n" + "st1 { v24.b }[10], [x22]\n" + "st1 { v28.b }[10], [x21]\n" "b 160f\n" "155:" // Height 6: Partial direct writeback: partial_1_8 "tbz x16, #0, 160f\n" "st1 { v8.b }[8], [x17]\n" - "st1 { v12.b }[8], [x24]\n" - "st1 { v16.b }[8], [x23]\n" - "st1 { v20.b }[8], [x22]\n" - "st1 { v24.b }[8], [x21]\n" - "st1 { v28.b }[8], [x20]\n" + "st1 { v12.b }[8], [x25]\n" + "st1 { v16.b }[8], [x24]\n" + "st1 { v20.b }[8], [x23]\n" + "st1 { v24.b }[8], [x22]\n" + "st1 { v28.b }[8], [x21]\n" "b 160f\n" "156:" // Height 6: Partial direct writeback: partial_4_0 "tbz x16, #2, 158f\n" "str s8, [x17], #0x4\n" - "str s12, [x24], #0x4\n" - "str s16, [x23], #0x4\n" - "str s20, [x22], #0x4\n" - "str s24, [x21], #0x4\n" - "str s28, [x20], #0x4\n" + "str s12, [x25], #0x4\n" + "str s16, [x24], #0x4\n" + "str s20, [x23], #0x4\n" + "str s24, [x22], #0x4\n" + "str s28, [x21], #0x4\n" "tbz x16, #1, 157f\n" "st1 { v8.h }[2], [x17], #0x2\n" - "st1 { v12.h }[2], [x24], #0x2\n" - "st1 { v16.h }[2], [x23], #0x2\n" - "st1 { v20.h }[2], [x22], #0x2\n" - "st1 { v24.h }[2], [x21], #0x2\n" - "st1 { v28.h }[2], [x20], #0x2\n" + "st1 { v12.h }[2], [x25], #0x2\n" + "st1 { v16.h }[2], [x24], #0x2\n" + "st1 { v20.h }[2], [x23], #0x2\n" + "st1 { v24.h }[2], [x22], #0x2\n" + "st1 { v28.h }[2], [x21], #0x2\n" "tbz x16, #0, 160f\n" "st1 { v8.b }[6], [x17]\n" - "st1 { v12.b }[6], [x24]\n" - "st1 { v16.b }[6], [x23]\n" - "st1 { v20.b }[6], [x22]\n" - "st1 { v24.b }[6], [x21]\n" - "st1 { v28.b }[6], [x20]\n" + "st1 { v12.b }[6], [x25]\n" + "st1 { v16.b }[6], [x24]\n" + "st1 { v20.b }[6], [x23]\n" + "st1 { v24.b }[6], [x22]\n" + "st1 { v28.b }[6], [x21]\n" "b 160f\n" "157:" // Height 6: Partial direct writeback: partial_1_4 "tbz x16, #0, 160f\n" "st1 { v8.b }[4], [x17]\n" - "st1 { v12.b }[4], [x24]\n" - "st1 { v16.b }[4], [x23]\n" - "st1 { v20.b }[4], [x22]\n" - "st1 { v24.b }[4], [x21]\n" - "st1 { v28.b }[4], [x20]\n" + "st1 { v12.b }[4], [x25]\n" + "st1 { v16.b }[4], [x24]\n" + "st1 { v20.b }[4], [x23]\n" + "st1 { v24.b }[4], [x22]\n" + "st1 { v28.b }[4], [x21]\n" "b 160f\n" "158:" // Height 6: Partial direct writeback: partial_2_0 "tbz x16, #1, 159f\n" "str h8, [x17], #0x2\n" - "str h12, [x24], #0x2\n" - "str h16, [x23], #0x2\n" - "str h20, [x22], #0x2\n" - "str h24, [x21], #0x2\n" - "str h28, [x20], #0x2\n" + "str h12, [x25], #0x2\n" + "str h16, [x24], #0x2\n" + "str h20, [x23], #0x2\n" + "str h24, [x22], #0x2\n" + "str h28, [x21], #0x2\n" "tbz x16, #0, 160f\n" "st1 { v8.b }[2], [x17]\n" - "st1 { v12.b }[2], [x24]\n" - "st1 { v16.b }[2], [x23]\n" - "st1 { v20.b }[2], [x22]\n" - "st1 { v24.b }[2], [x21]\n" - "st1 { v28.b }[2], [x20]\n" + "st1 { v12.b }[2], [x25]\n" + "st1 { v16.b }[2], [x24]\n" + "st1 { v20.b }[2], [x23]\n" + "st1 { v24.b }[2], [x22]\n" + "st1 { v28.b }[2], [x21]\n" "b 160f\n" "159:" // Height 6: Partial direct writeback: partial_1_0 "str b8, [x17, #0x0]\n" - "str b12, [x24, #0x0]\n" - "str b16, [x23, #0x0]\n" - "str b20, [x22, #0x0]\n" - "str b24, [x21, #0x0]\n" - "str b28, [x20, #0x0]\n" + "str b12, [x25, #0x0]\n" + "str b16, [x24, #0x0]\n" + "str b20, [x23, #0x0]\n" + "str b24, [x22, #0x0]\n" + "str b28, [x21, #0x0]\n" "160:" // Height 6: Partial direct writeback: Done "b 162f\n" "161:" // Height 6: Full writeback "str q8, [x17, #0x0]\n" "add x17, x17, #0x10\n" - "str q12, [x24, #0x0]\n" - "str q16, [x23, #0x0]\n" - "str q20, [x22, #0x0]\n" - "str q24, [x21, #0x0]\n" - "str q28, [x20, #0x0]\n" + "str q12, [x25, #0x0]\n" + "str q16, [x24, #0x0]\n" + "str q20, [x23, #0x0]\n" + "str q24, [x22, #0x0]\n" + "str q28, [x21, #0x0]\n" "162:" // Height 6: Writeback done "subs x16, x16, #0x10\n" "bgt 137b\n" @@ -3761,7 +3760,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 ( "madd %x[input_ptr], x20, x21, %x[input_ptr]\n" "b 1b\n" "164:" // Exit - : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" -- cgit v1.2.1