diff options
author | Michael Tyler <michael.tyler@arm.com> | 2023-04-12 17:43:17 +0100 |
---|---|---|
committer | michael.tyler <michael.tyler@arm.com> | 2023-06-05 15:57:58 +0000 |
commit | 74921eee924625426429044decefe3673561b174 (patch) | |
tree | 654da1a95e3d42d6af8ad1ff27bb40d77b1fd8c5 /src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16 | |
parent | df5d9878008be9b60586df97ebfff197abb5195e (diff) | |
download | ComputeLibrary-74921eee924625426429044decefe3673561b174.tar.gz |
Update CPU kernel implementations and guard directives
Resolves COMPMID-6023
Change-Id: I868975d14c4f98af6716726feda22405a6a4c891
Signed-off-by: Michael Tyler <michael.tyler@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9686
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp | 1616 |
1 files changed, 807 insertions, 809 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp index f808cb199d..1335b355ef 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp @@ -78,7 +78,6 @@ void a64_hybrid_u8qa_mmla_4x16 ( flags |= 0x20; } __asm__ __volatile__( - "1:" // Row loop "cmp %x[M], #0x4\n" "bge 97f\n" @@ -106,11 +105,11 @@ void a64_hybrid_u8qa_mmla_4x16 ( "4:" // Height 1: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 5f\n" - "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x24, [x21, #0x0]\n" + "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x24, [x20, #0x0]\n" "cbnz x26, 6f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x24, x24, x20\n" @@ -131,35 +130,35 @@ void a64_hybrid_u8qa_mmla_4x16 ( "ldr q4, [x28, #0x60]\n" "blt 9f\n" "7:" // Height 1: Multiply loop: Main loop head - "trn1 v0.2d, v1.2d, v2.2d\n" + "trn1 v0.2d, v1.2d, v27.2d\n" ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0x70]\n" - "trn2 v1.2d, v1.2d, v2.2d\n" + "ldr q25, [x28, #0x70]\n" + "trn2 v1.2d, v1.2d, v27.2d\n" ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n" - "ldr q6, [x28, #0x80]\n" + "ldr q24, [x28, #0x80]\n" ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" - "ldr q7, [x28, #0x90]\n" + "ldr q30, [x28, #0x90]\n" ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n" - "ldr q8, [x28, #0xa0]\n" + "ldr q29, [x28, #0xa0]\n" ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n" - "ldr q9, [x28, #0xb0]\n" + "ldr q28, [x28, #0xb0]\n" ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n" - "ldr q10, [x28, #0xc0]\n" + "ldr q27, [x28, #0xc0]\n" ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n" - "ldr q4, [x28, #0xd0]\n" - ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0xe0]\n" - ".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n" - "ldr q6, [x28, #0xf0]\n" - ".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n" + "ldr q26, [x28, #0xd0]\n" + ".inst 0x6e99a417 // ummla v23.4s, v0.16b, v25.16b\n" + "ldr q25, [x28, #0xe0]\n" + ".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n" + "ldr q24, [x28, #0xf0]\n" + ".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n" "add x24, x24, #0x10\n" - ".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n" + ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n" "add x28, x28, #0x100\n" - ".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n" - ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n" - ".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n" - ".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n" - ".inst 0x6e86a437 // ummla v23.4s, v1.16b, v6.16b\n" + ".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n" + ".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n" + ".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n" + ".inst 0x6e99a433 // ummla v19.4s, v1.16b, v25.16b\n" + ".inst 0x6e98a437 // ummla v23.4s, v1.16b, v24.16b\n" "tbnz %x[flags], #31, 8f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n" @@ -177,36 +176,36 @@ void a64_hybrid_u8qa_mmla_4x16 ( "prfm pldl1keep, [x24, #0x80]\n" "bge 7b\n" "9:" // Height 1: Multiply loop: Single iteration only - "trn1 v0.2d, v1.2d, v2.2d\n" + "trn1 v0.2d, v1.2d, v24.2d\n" ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0x70]\n" - "trn2 v1.2d, v1.2d, v2.2d\n" + "ldr q25, [x28, #0x70]\n" + "trn2 v1.2d, v1.2d, v24.2d\n" ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n" - "ldr q6, [x28, #0x80]\n" + "ldr q24, [x28, #0x80]\n" ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" - "ldr q7, [x28, #0x90]\n" + "ldr q30, [x28, #0x90]\n" ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n" - "ldr q8, [x28, #0xa0]\n" + "ldr q29, [x28, #0xa0]\n" ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n" - "ldr q9, [x28, #0xb0]\n" + "ldr q28, [x28, #0xb0]\n" ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n" - "ldr q10, [x28, #0xc0]\n" + "ldr q27, [x28, #0xc0]\n" ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n" - "ldr q4, [x28, #0xd0]\n" - ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0xe0]\n" - ".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n" - "ldr q6, [x28, #0xf0]\n" + "ldr q26, [x28, #0xd0]\n" + ".inst 0x6e99a417 // ummla v23.4s, v0.16b, v25.16b\n" + "ldr q25, [x28, #0xe0]\n" + ".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n" + "ldr q24, [x28, #0xf0]\n" "sub x25, x25, #0x10\n" - ".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n" - ".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n" + ".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n" + ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n" "add x24, x24, #0x10\n" "add x28, x28, #0x100\n" - ".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n" - ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n" - ".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n" - ".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n" - ".inst 0x6e86a437 // ummla v23.4s, v1.16b, v6.16b\n" + ".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n" + ".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n" + ".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n" + ".inst 0x6e99a433 // ummla v19.4s, v1.16b, v25.16b\n" + ".inst 0x6e98a437 // ummla v23.4s, v1.16b, v24.16b\n" "tbnz %x[flags], #31, 10f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n" @@ -217,29 +216,29 @@ void a64_hybrid_u8qa_mmla_4x16 ( "cmp x25, #0x8\n" "blt 14f\n" "12:" // Height 1: Multiply loop: Odd block loop - "ldr d1, [x24], #0x8\n" - "trn1 v0.2d, v1.2d, v2.2d\n" + "ldr d25, [x24], #0x8\n" + "trn1 v0.2d, v25.2d, v24.2d\n" "tbnz %x[flags], #31, 13f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" "13:" // Height 1: Multiply loop: unique 3: skip row sum - "ldr q8, [x28, #0x0]\n" - "ldr q9, [x28, #0x10]\n" - ".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n" + "ldr q24, [x28, #0x0]\n" + "ldr q26, [x28, #0x10]\n" + ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n" "sub x25, x25, #0x8\n" - "ldr q10, [x28, #0x20]\n" - "ldr q4, [x28, #0x30]\n" + "ldr q25, [x28, #0x20]\n" + "ldr q24, [x28, #0x30]\n" "cmp x25, #0x8\n" - ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n" - "ldr q5, [x28, #0x40]\n" - "ldr q6, [x28, #0x50]\n" - ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n" - ".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n" - "ldr q7, [x28, #0x60]\n" - "ldr q8, [x28, #0x70]\n" - ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n" - ".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n" - ".inst 0x6e87a413 // ummla v19.4s, v0.16b, v7.16b\n" - ".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n" + ".inst 0x6e9aa414 // ummla v20.4s, v0.16b, v26.16b\n" + "ldr q27, [x28, #0x40]\n" + "ldr q26, [x28, #0x50]\n" + ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x60]\n" + "ldr q24, [x28, #0x70]\n" + ".inst 0x6e9ba412 // ummla v18.4s, v0.16b, v27.16b\n" + ".inst 0x6e9aa416 // ummla v22.4s, v0.16b, v26.16b\n" + ".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n" "add x28, x28, #0x80\n" "bge 12b\n" "14:" // Height 1: Multiply loop: Skip odd blocks @@ -264,26 +263,26 @@ void a64_hybrid_u8qa_mmla_4x16 ( "17:" // Height 1: Multiply loop: Ragged operand read: partial_1_0 "ldr b1, [x24, #0x0]\n" "18:" // Height 1: Multiply loop: Ragged operand read: Done - "trn1 v0.2d, v1.2d, v2.2d\n" + "trn1 v0.2d, v1.2d, v24.2d\n" "tbnz %x[flags], #31, 19f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" "19:" // Height 1: Multiply loop: unique 4: skip row sum - "ldr q10, [x28, #0x0]\n" - "ldr q4, [x28, #0x10]\n" - ".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n" - ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n" - "ldr q5, [x28, #0x20]\n" - "ldr q6, [x28, #0x30]\n" - ".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n" - ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n" - "ldr q7, [x28, #0x40]\n" - "ldr q8, [x28, #0x50]\n" - ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n" - ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n" - "ldr q9, [x28, #0x60]\n" - "ldr q10, [x28, #0x70]\n" - ".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n" - ".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n" + "ldr q25, [x28, #0x0]\n" + "ldr q24, [x28, #0x10]\n" + ".inst 0x6e99a410 // ummla v16.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a414 // ummla v20.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x20]\n" + "ldr q24, [x28, #0x30]\n" + ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x40]\n" + "ldr q24, [x28, #0x50]\n" + ".inst 0x6e99a412 // ummla v18.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a416 // ummla v22.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x60]\n" + "ldr q24, [x28, #0x70]\n" + ".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n" "add x28, x28, #0x80\n" "20:" // Height 1: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" @@ -297,75 +296,75 @@ void a64_hybrid_u8qa_mmla_4x16 ( "uzp1 v19.2d, v19.2d, v23.2d\n" "mov v23.16b, v16.16b\n" "tbnz %x[flags], #31, 21f\n" - "add x23, %x[qp], %[b_offset]\n" - "ld1r { v1.4s }, [x23]\n" + "add x20, %x[qp], %[b_offset]\n" + "ld1r { v16.4s }, [x20]\n" "addp v11.4s, v11.4s, v11.4s\n" - "neg v1.4s, v1.4s\n" + "neg v16.4s, v16.4s\n" "dup v11.4s, v11.s[0]\n" - "mul v11.4s, v11.4s, v1.4s\n" + "mul v11.4s, v11.4s, v16.4s\n" "21:" // Height 1: skip row sum fixup - "ldr q0, [x10, #0x0]\n" - "ldr q1, [x10, #0x10]\n" + "ldr q24, [x10, #0x0]\n" + "ldr q22, [x10, #0x10]\n" "add v23.4s, v23.4s, v11.4s\n" "add v17.4s, v17.4s, v11.4s\n" - "ldr q2, [x10, #0x20]\n" - "ldr q3, [x10, #0x30]\n" + "ldr q21, [x10, #0x20]\n" + "ldr q20, [x10, #0x30]\n" "add v18.4s, v18.4s, v11.4s\n" "add v19.4s, v19.4s, v11.4s\n" - "add x23, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v16.4s }, [x20]\n" "orr %x[flags], %x[flags], #0x80000000\n" - "add v23.4s, v23.4s, v0.4s\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" - "ld1r { v0.4s }, [x23]\n" - "add v19.4s, v19.4s, v3.4s\n" - "sqrdmulh v23.4s, v23.4s, v4.4s\n" + "add v23.4s, v23.4s, v24.4s\n" + "add v17.4s, v17.4s, v22.4s\n" + "add v18.4s, v18.4s, v21.4s\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "ld1r { v0.4s }, [x20]\n" + "add v19.4s, v19.4s, v20.4s\n" + "sqrdmulh v23.4s, v23.4s, v16.4s\n" "add x10, x10, #0x40\n" - "sqrdmulh v17.4s, v17.4s, v4.4s\n" - "sqrdmulh v18.4s, v18.4s, v4.4s\n" - "sqrdmulh v19.4s, v19.4s, v4.4s\n" + "sqrdmulh v17.4s, v17.4s, v16.4s\n" + "sqrdmulh v18.4s, v18.4s, v16.4s\n" + "sqrdmulh v19.4s, v19.4s, v16.4s\n" "tbz %x[flags], #5, 22f\n" - "and v4.16b, v23.16b, v0.16b\n" - "and v5.16b, v17.16b, v0.16b\n" - "and v6.16b, v18.16b, v0.16b\n" - "and v7.16b, v19.16b, v0.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqadd v23.4s, v23.4s, v4.4s\n" - "sqadd v17.4s, v17.4s, v5.4s\n" - "sqadd v18.4s, v18.4s, v6.4s\n" - "sqadd v19.4s, v19.4s, v7.4s\n" + "and v22.16b, v23.16b, v0.16b\n" + "and v21.16b, v17.16b, v0.16b\n" + "and v20.16b, v18.16b, v0.16b\n" + "and v16.16b, v19.16b, v0.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "sshr v16.4s, v16.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v22.4s\n" + "sqadd v17.4s, v17.4s, v21.4s\n" + "sqadd v18.4s, v18.4s, v20.4s\n" + "sqadd v19.4s, v19.4s, v16.4s\n" "22:" // Height 1: no shift correction - "add x23, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v21.4s }, [x20]\n" "srshl v23.4s, v23.4s, v0.4s\n" "srshl v17.4s, v17.4s, v0.4s\n" "srshl v18.4s, v18.4s, v0.4s\n" "srshl v19.4s, v19.4s, v0.4s\n" - "add x23, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x23]\n" - "add v23.4s, v23.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add x23, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x23]\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v20.4s }, [x20]\n" + "add v23.4s, v23.4s, v21.4s\n" + "add v17.4s, v17.4s, v21.4s\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v16.4s }, [x20]\n" + "add v18.4s, v18.4s, v21.4s\n" + "add v19.4s, v19.4s, v21.4s\n" "cmp x9, #0x10\n" - "smin v23.4s, v23.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "smax v23.4s, v23.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" + "smin v23.4s, v23.4s, v20.4s\n" + "smin v17.4s, v17.4s, v20.4s\n" + "smin v18.4s, v18.4s, v20.4s\n" + "smin v19.4s, v19.4s, v20.4s\n" + "smax v23.4s, v23.4s, v16.4s\n" + "smax v17.4s, v17.4s, v16.4s\n" + "smax v18.4s, v18.4s, v16.4s\n" + "smax v19.4s, v19.4s, v16.4s\n" "uzp1 v23.8h, v23.8h, v17.8h\n" - "uzp1 v17.8h, v18.8h, v19.8h\n" - "uzp1 v23.16b, v23.16b, v17.16b\n" + "uzp1 v16.8h, v18.8h, v19.8h\n" + "uzp1 v23.16b, v23.16b, v16.16b\n" "bge 31f\n" "tbz x9, #3, 26f\n" "str d23, [x27], #0x8\n" @@ -442,12 +441,12 @@ void a64_hybrid_u8qa_mmla_4x16 ( "36:" // Height 2: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 37f\n" - "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x24, [x21, #0x0]\n" - "ldr x23, [x21, #0x8]\n" + "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x24, [x20, #0x0]\n" + "ldr x23, [x20, #0x8]\n" "cbnz x26, 38f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x24, x24, x20\n" @@ -455,7 +454,7 @@ void a64_hybrid_u8qa_mmla_4x16 ( "b 38f\n" "37:" // Height 2: setup direct input "mov x24, %x[input_ptr]\n" - "add x23, x24, x20\n" + "add x23, x24, x21\n" "38:" // Height 2: input setup done "cmp x25, #0x10\n" "blt 43f\n" @@ -473,34 +472,34 @@ void a64_hybrid_u8qa_mmla_4x16 ( "39:" // Height 2: Multiply loop: Main loop head "trn1 v0.2d, v1.2d, v2.2d\n" ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0x70]\n" + "ldr q25, [x28, #0x70]\n" ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n" - "ldr q6, [x28, #0x80]\n" + "ldr q24, [x28, #0x80]\n" "trn2 v1.2d, v1.2d, v2.2d\n" ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" - "ldr q7, [x28, #0x90]\n" + "ldr q30, [x28, #0x90]\n" ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n" - "ldr q8, [x28, #0xa0]\n" + "ldr q29, [x28, #0xa0]\n" ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n" - "ldr q9, [x28, #0xb0]\n" + "ldr q28, [x28, #0xb0]\n" ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n" - "ldr q10, [x28, #0xc0]\n" + "ldr q27, [x28, #0xc0]\n" ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n" - "ldr q4, [x28, #0xd0]\n" - ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0xe0]\n" - ".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n" - "ldr q6, [x28, #0xf0]\n" - ".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n" + "ldr q26, [x28, #0xd0]\n" + ".inst 0x6e99a417 // ummla v23.4s, v0.16b, v25.16b\n" + "ldr q25, [x28, #0xe0]\n" + ".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n" + "ldr q24, [x28, #0xf0]\n" + ".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n" "add x24, x24, #0x10\n" - ".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n" + ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n" "add x23, x23, #0x10\n" - ".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n" + ".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n" "add x28, x28, #0x100\n" - ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n" - ".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n" - ".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n" - ".inst 0x6e86a437 // ummla v23.4s, v1.16b, v6.16b\n" + ".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n" + ".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n" + ".inst 0x6e99a433 // ummla v19.4s, v1.16b, v25.16b\n" + ".inst 0x6e98a437 // ummla v23.4s, v1.16b, v24.16b\n" "tbnz %x[flags], #31, 40f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n" @@ -522,35 +521,35 @@ void a64_hybrid_u8qa_mmla_4x16 ( "41:" // Height 2: Multiply loop: Single iteration only "trn1 v0.2d, v1.2d, v2.2d\n" ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0x70]\n" + "ldr q25, [x28, #0x70]\n" ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n" - "ldr q6, [x28, #0x80]\n" + "ldr q24, [x28, #0x80]\n" "trn2 v1.2d, v1.2d, v2.2d\n" ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" - "ldr q7, [x28, #0x90]\n" + "ldr q30, [x28, #0x90]\n" ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n" - "ldr q8, [x28, #0xa0]\n" + "ldr q29, [x28, #0xa0]\n" ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n" - "ldr q9, [x28, #0xb0]\n" + "ldr q28, [x28, #0xb0]\n" ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n" - "ldr q10, [x28, #0xc0]\n" + "ldr q27, [x28, #0xc0]\n" ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n" - "ldr q4, [x28, #0xd0]\n" - ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n" - "ldr q5, [x28, #0xe0]\n" - ".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n" - "ldr q6, [x28, #0xf0]\n" + "ldr q26, [x28, #0xd0]\n" + ".inst 0x6e99a417 // ummla v23.4s, v0.16b, v25.16b\n" + "ldr q25, [x28, #0xe0]\n" + ".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n" + "ldr q24, [x28, #0xf0]\n" "sub x25, x25, #0x10\n" - ".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n" - ".inst 0x6e88a431 // ummla v17.4s, v1.16b, v8.16b\n" + ".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n" + ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n" "add x24, x24, #0x10\n" "add x23, x23, #0x10\n" - ".inst 0x6e89a435 // ummla v21.4s, v1.16b, v9.16b\n" - ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n" + ".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n" + ".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n" "add x28, x28, #0x100\n" - ".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n" - ".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n" - ".inst 0x6e86a437 // ummla v23.4s, v1.16b, v6.16b\n" + ".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n" + ".inst 0x6e99a433 // ummla v19.4s, v1.16b, v25.16b\n" + ".inst 0x6e98a437 // ummla v23.4s, v1.16b, v24.16b\n" "tbnz %x[flags], #31, 42f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n" @@ -562,30 +561,30 @@ void a64_hybrid_u8qa_mmla_4x16 ( "cmp x25, #0x8\n" "blt 46f\n" "44:" // Height 2: Multiply loop: Odd block loop - "ldr d1, [x24], #0x8\n" - "ldr d2, [x23], #0x8\n" - "trn1 v0.2d, v1.2d, v2.2d\n" + "ldr d25, [x24], #0x8\n" + "ldr d24, [x23], #0x8\n" + "trn1 v0.2d, v25.2d, v24.2d\n" "tbnz %x[flags], #31, 45f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" "45:" // Height 2: Multiply loop: unique 7: skip row sum - "ldr q8, [x28, #0x0]\n" - "ldr q9, [x28, #0x10]\n" - ".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n" + "ldr q24, [x28, #0x0]\n" + "ldr q26, [x28, #0x10]\n" + ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n" "sub x25, x25, #0x8\n" - "ldr q10, [x28, #0x20]\n" - "ldr q4, [x28, #0x30]\n" + "ldr q25, [x28, #0x20]\n" + "ldr q24, [x28, #0x30]\n" "cmp x25, #0x8\n" - ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n" - "ldr q5, [x28, #0x40]\n" - "ldr q6, [x28, #0x50]\n" - ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n" - ".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n" - "ldr q7, [x28, #0x60]\n" - "ldr q8, [x28, #0x70]\n" - ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n" - ".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n" - ".inst 0x6e87a413 // ummla v19.4s, v0.16b, v7.16b\n" - ".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n" + ".inst 0x6e9aa414 // ummla v20.4s, v0.16b, v26.16b\n" + "ldr q27, [x28, #0x40]\n" + "ldr q26, [x28, #0x50]\n" + ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x60]\n" + "ldr q24, [x28, #0x70]\n" + ".inst 0x6e9ba412 // ummla v18.4s, v0.16b, v27.16b\n" + ".inst 0x6e9aa416 // ummla v22.4s, v0.16b, v26.16b\n" + ".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n" "add x28, x28, #0x80\n" "bge 44b\n" "46:" // Height 2: Multiply loop: Skip odd blocks @@ -621,22 +620,22 @@ void a64_hybrid_u8qa_mmla_4x16 ( "tbnz %x[flags], #31, 51f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" "51:" // Height 2: Multiply loop: unique 8: skip row sum - "ldr q10, [x28, #0x0]\n" - "ldr q4, [x28, #0x10]\n" - ".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n" - ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n" - "ldr q5, [x28, #0x20]\n" - "ldr q6, [x28, #0x30]\n" - ".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n" - ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n" - "ldr q7, [x28, #0x40]\n" - "ldr q8, [x28, #0x50]\n" - ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n" - ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n" - "ldr q9, [x28, #0x60]\n" - "ldr q10, [x28, #0x70]\n" - ".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n" - ".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n" + "ldr q25, [x28, #0x0]\n" + "ldr q24, [x28, #0x10]\n" + ".inst 0x6e99a410 // ummla v16.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a414 // ummla v20.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x20]\n" + "ldr q24, [x28, #0x30]\n" + ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x40]\n" + "ldr q24, [x28, #0x50]\n" + ".inst 0x6e99a412 // ummla v18.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a416 // ummla v22.4s, v0.16b, v24.16b\n" + "ldr q25, [x28, #0x60]\n" + "ldr q24, [x28, #0x70]\n" + ".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n" + ".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n" "add x28, x28, #0x80\n" "52:" // Height 2: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" @@ -644,127 +643,127 @@ void a64_hybrid_u8qa_mmla_4x16 ( "cmp x26, x20\n" "bne 36b\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "uzp1 v4.2d, v16.2d, v20.2d\n" - "add x22, x27, x20\n" + "uzp1 v24.2d, v16.2d, v20.2d\n" + "add x23, x27, x20\n" "uzp2 v16.2d, v16.2d, v20.2d\n" "uzp1 v20.2d, v17.2d, v21.2d\n" "uzp2 v17.2d, v17.2d, v21.2d\n" "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x22, #0x0]\n" + "prfm pstl1keep, [x23, #0x0]\n" "uzp1 v21.2d, v18.2d, v22.2d\n" "uzp2 v18.2d, v18.2d, v22.2d\n" "uzp1 v22.2d, v19.2d, v23.2d\n" "uzp2 v19.2d, v19.2d, v23.2d\n" - "mov v23.16b, v4.16b\n" + "mov v23.16b, v24.16b\n" "tbnz %x[flags], #31, 53f\n" - "add x23, %x[qp], %[b_offset]\n" - "ld1r { v2.4s }, [x23]\n" + "add x20, %x[qp], %[b_offset]\n" + "ld1r { v24.4s }, [x20]\n" "addp v11.4s, v11.4s, v11.4s\n" - "neg v2.4s, v2.4s\n" + "neg v24.4s, v24.4s\n" "dup v12.4s, v11.s[3]\n" "dup v11.4s, v11.s[0]\n" - "mul v11.4s, v11.4s, v2.4s\n" - "mul v12.4s, v12.4s, v2.4s\n" + "mul v11.4s, v11.4s, v24.4s\n" + "mul v12.4s, v12.4s, v24.4s\n" "53:" // Height 2: skip row sum fixup - "ldr q0, [x10, #0x0]\n" - "ldr q1, [x10, #0x10]\n" + "ldr q28, [x10, #0x0]\n" + "ldr q27, [x10, #0x10]\n" "add v23.4s, v23.4s, v11.4s\n" "add v20.4s, v20.4s, v11.4s\n" - "ldr q2, [x10, #0x20]\n" - "ldr q3, [x10, #0x30]\n" + "ldr q26, [x10, #0x20]\n" + "ldr q25, [x10, #0x30]\n" "add v21.4s, v21.4s, v11.4s\n" "add v22.4s, v22.4s, v11.4s\n" "add v16.4s, v16.4s, v12.4s\n" "add v17.4s, v17.4s, v12.4s\n" - "add x23, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v24.4s }, [x20]\n" "add v18.4s, v18.4s, v12.4s\n" "add v19.4s, v19.4s, v12.4s\n" "orr %x[flags], %x[flags], #0x80000000\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" - "add v23.4s, v23.4s, v0.4s\n" - "add v20.4s, v20.4s, v1.4s\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" + "add v23.4s, v23.4s, v28.4s\n" + "add v20.4s, v20.4s, v27.4s\n" "add x10, x10, #0x40\n" - "add v21.4s, v21.4s, v2.4s\n" - "add v22.4s, v22.4s, v3.4s\n" - "add v16.4s, v16.4s, v0.4s\n" - "ld1r { v0.4s }, [x23]\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add v19.4s, v19.4s, v3.4s\n" - "sqrdmulh v23.4s, v23.4s, v4.4s\n" - "sqrdmulh v20.4s, v20.4s, v4.4s\n" - "sqrdmulh v21.4s, v21.4s, v4.4s\n" - "sqrdmulh v22.4s, v22.4s, v4.4s\n" - "sqrdmulh v16.4s, v16.4s, v4.4s\n" - "sqrdmulh v17.4s, v17.4s, v4.4s\n" - "sqrdmulh v18.4s, v18.4s, v4.4s\n" - "sqrdmulh v19.4s, v19.4s, v4.4s\n" + "add v21.4s, v21.4s, v26.4s\n" + "add v22.4s, v22.4s, v25.4s\n" + "add v16.4s, v16.4s, v28.4s\n" + "ld1r { v0.4s }, [x20]\n" + "add v17.4s, v17.4s, v27.4s\n" + "add v18.4s, v18.4s, v26.4s\n" + "add v19.4s, v19.4s, v25.4s\n" + "sqrdmulh v23.4s, v23.4s, v24.4s\n" + "sqrdmulh v20.4s, v20.4s, v24.4s\n" + "sqrdmulh v21.4s, v21.4s, v24.4s\n" + "sqrdmulh v22.4s, v22.4s, v24.4s\n" + "sqrdmulh v16.4s, v16.4s, v24.4s\n" + "sqrdmulh v17.4s, v17.4s, v24.4s\n" + "sqrdmulh v18.4s, v18.4s, v24.4s\n" + "sqrdmulh v19.4s, v19.4s, v24.4s\n" "tbz %x[flags], #5, 54f\n" - "and v4.16b, v23.16b, v0.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sqadd v23.4s, v23.4s, v4.4s\n" - "and v5.16b, v20.16b, v0.16b\n" - "and v6.16b, v21.16b, v0.16b\n" - "and v7.16b, v22.16b, v0.16b\n" - "and v8.16b, v16.16b, v0.16b\n" - "and v9.16b, v17.16b, v0.16b\n" - "and v10.16b, v18.16b, v0.16b\n" - "and v4.16b, v19.16b, v0.16b\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "sshr v9.4s, v9.4s, #0x1f\n" - "sshr v10.4s, v10.4s, #0x1f\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sqadd v20.4s, v20.4s, v5.4s\n" - "sqadd v21.4s, v21.4s, v6.4s\n" - "sqadd v22.4s, v22.4s, v7.4s\n" - "sqadd v16.4s, v16.4s, v8.4s\n" - "sqadd v17.4s, v17.4s, v9.4s\n" - "sqadd v18.4s, v18.4s, v10.4s\n" - "sqadd v19.4s, v19.4s, v4.4s\n" + "and v24.16b, v23.16b, v0.16b\n" + "sshr v24.4s, v24.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v24.4s\n" + "and v30.16b, v20.16b, v0.16b\n" + "and v29.16b, v21.16b, v0.16b\n" + "and v28.16b, v22.16b, v0.16b\n" + "and v27.16b, v16.16b, v0.16b\n" + "and v26.16b, v17.16b, v0.16b\n" + "and v25.16b, v18.16b, v0.16b\n" + "and v24.16b, v19.16b, v0.16b\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sshr v24.4s, v24.4s, #0x1f\n" + "sqadd v20.4s, v20.4s, v30.4s\n" + "sqadd v21.4s, v21.4s, v29.4s\n" + "sqadd v22.4s, v22.4s, v28.4s\n" + "sqadd v16.4s, v16.4s, v27.4s\n" + "sqadd v17.4s, v17.4s, v26.4s\n" + "sqadd v18.4s, v18.4s, v25.4s\n" + "sqadd v19.4s, v19.4s, v24.4s\n" "54:" // Height 2: no shift correction - "add x23, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v26.4s }, [x20]\n" "srshl v23.4s, v23.4s, v0.4s\n" "srshl v20.4s, v20.4s, v0.4s\n" "srshl v21.4s, v21.4s, v0.4s\n" "srshl v22.4s, v22.4s, v0.4s\n" - "add x23, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x23]\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v25.4s }, [x20]\n" "srshl v16.4s, v16.4s, v0.4s\n" "srshl v17.4s, v17.4s, v0.4s\n" - "add x23, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x23]\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v24.4s }, [x20]\n" "srshl v18.4s, v18.4s, v0.4s\n" "srshl v19.4s, v19.4s, v0.4s\n" "cmp x9, #0x10\n" - "add v23.4s, v23.4s, v4.4s\n" - "add v20.4s, v20.4s, v4.4s\n" - "add v21.4s, v21.4s, v4.4s\n" - "add v22.4s, v22.4s, v4.4s\n" - "add v16.4s, v16.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" - "smin v23.4s, v23.4s, v6.4s\n" - "smin v20.4s, v20.4s, v6.4s\n" - "smin v21.4s, v21.4s, v6.4s\n" - "smin v22.4s, v22.4s, v6.4s\n" - "smin v16.4s, v16.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "smax v23.4s, v23.4s, v5.4s\n" - "smax v20.4s, v20.4s, v5.4s\n" - "smax v21.4s, v21.4s, v5.4s\n" - "smax v22.4s, v22.4s, v5.4s\n" - "smax v16.4s, v16.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" + "add v23.4s, v23.4s, v26.4s\n" + "add v20.4s, v20.4s, v26.4s\n" + "add v21.4s, v21.4s, v26.4s\n" + "add v22.4s, v22.4s, v26.4s\n" + "add v16.4s, v16.4s, v26.4s\n" + "add v17.4s, v17.4s, v26.4s\n" + "add v18.4s, v18.4s, v26.4s\n" + "add v19.4s, v19.4s, v26.4s\n" + "smin v23.4s, v23.4s, v25.4s\n" + "smin v20.4s, v20.4s, v25.4s\n" + "smin v21.4s, v21.4s, v25.4s\n" + "smin v22.4s, v22.4s, v25.4s\n" + "smin v16.4s, v16.4s, v25.4s\n" + "smin v17.4s, v17.4s, v25.4s\n" + "smin v18.4s, v18.4s, v25.4s\n" + "smin v19.4s, v19.4s, v25.4s\n" + "smax v23.4s, v23.4s, v24.4s\n" + "smax v20.4s, v20.4s, v24.4s\n" + "smax v21.4s, v21.4s, v24.4s\n" + "smax v22.4s, v22.4s, v24.4s\n" + "smax v16.4s, v16.4s, v24.4s\n" + "smax v17.4s, v17.4s, v24.4s\n" + "smax v18.4s, v18.4s, v24.4s\n" + "smax v19.4s, v19.4s, v24.4s\n" "uzp1 v23.8h, v23.8h, v20.8h\n" "uzp1 v20.8h, v21.8h, v22.8h\n" "uzp1 v16.8h, v16.8h, v17.8h\n" @@ -774,68 +773,68 @@ void a64_hybrid_u8qa_mmla_4x16 ( "bge 63f\n" "tbz x9, #3, 58f\n" "str d23, [x27], #0x8\n" - "str d16, [x22], #0x8\n" + "str d16, [x23], #0x8\n" "tbz x9, #2, 56f\n" "st1 { v23.s }[2], [x27], #0x4\n" - "st1 { v16.s }[2], [x22], #0x4\n" + "st1 { v16.s }[2], [x23], #0x4\n" "tbz x9, #1, 55f\n" "st1 { v23.h }[6], [x27], #0x2\n" - "st1 { v16.h }[6], [x22], #0x2\n" + "st1 { v16.h }[6], [x23], #0x2\n" "tbz x9, #0, 62f\n" "st1 { v23.b }[14], [x27]\n" - "st1 { v16.b }[14], [x22]\n" + "st1 { v16.b }[14], [x23]\n" "b 62f\n" "55:" // Height 2: Partial direct writeback: partial_1_12 "tbz x9, #0, 62f\n" "st1 { v23.b }[12], [x27]\n" - "st1 { v16.b }[12], [x22]\n" + "st1 { v16.b }[12], [x23]\n" "b 62f\n" "56:" // Height 2: Partial direct writeback: partial_2_8 "tbz x9, #1, 57f\n" "st1 { v23.h }[4], [x27], #0x2\n" - "st1 { v16.h }[4], [x22], #0x2\n" + "st1 { v16.h }[4], [x23], #0x2\n" "tbz x9, #0, 62f\n" "st1 { v23.b }[10], [x27]\n" - "st1 { v16.b }[10], [x22]\n" + "st1 { v16.b }[10], [x23]\n" "b 62f\n" "57:" // Height 2: Partial direct writeback: partial_1_8 "tbz x9, #0, 62f\n" "st1 { v23.b }[8], [x27]\n" - "st1 { v16.b }[8], [x22]\n" + "st1 { v16.b }[8], [x23]\n" "b 62f\n" "58:" // Height 2: Partial direct writeback: partial_4_0 "tbz x9, #2, 60f\n" "str s23, [x27], #0x4\n" - "str s16, [x22], #0x4\n" + "str s16, [x23], #0x4\n" "tbz x9, #1, 59f\n" "st1 { v23.h }[2], [x27], #0x2\n" - "st1 { v16.h }[2], [x22], #0x2\n" + "st1 { v16.h }[2], [x23], #0x2\n" "tbz x9, #0, 62f\n" "st1 { v23.b }[6], [x27]\n" - "st1 { v16.b }[6], [x22]\n" + "st1 { v16.b }[6], [x23]\n" "b 62f\n" "59:" // Height 2: Partial direct writeback: partial_1_4 "tbz x9, #0, 62f\n" "st1 { v23.b }[4], [x27]\n" - "st1 { v16.b }[4], [x22]\n" + "st1 { v16.b }[4], [x23]\n" "b 62f\n" "60:" // Height 2: Partial direct writeback: partial_2_0 "tbz x9, #1, 61f\n" "str h23, [x27], #0x2\n" - "str h16, [x22], #0x2\n" + "str h16, [x23], #0x2\n" "tbz x9, #0, 62f\n" "st1 { v23.b }[2], [x27]\n" - "st1 { v16.b }[2], [x22]\n" + "st1 { v16.b }[2], [x23]\n" "b 62f\n" "61:" // Height 2: Partial direct writeback: partial_1_0 "str b23, [x27, #0x0]\n" - "str b16, [x22, #0x0]\n" + "str b16, [x23, #0x0]\n" "62:" // Height 2: Partial direct writeback: Done "b 64f\n" "63:" // Height 2: Full writeback "str q23, [x27, #0x0]\n" "add x27, x27, #0x10\n" - "str q16, [x22, #0x0]\n" + "str q16, [x23, #0x0]\n" "64:" // Height 2: Writeback done "subs x9, x9, #0x10\n" "bgt 34b\n" @@ -872,13 +871,13 @@ void a64_hybrid_u8qa_mmla_4x16 ( "68:" // Height 3: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 69f\n" - "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x24, [x21, #0x0]\n" - "ldr x23, [x21, #0x8]\n" - "ldr x22, [x21, #0x10]\n" + "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x24, [x20, #0x0]\n" + "ldr x23, [x20, #0x8]\n" + "ldr x22, [x20, #0x10]\n" "cbnz x26, 70f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x24, x24, x20\n" @@ -887,8 +886,8 @@ void a64_hybrid_u8qa_mmla_4x16 ( "b 70f\n" "69:" // Height 3: setup direct input "mov x24, %x[input_ptr]\n" - "add x23, x24, x20\n" - "add x22, x23, x20\n" + "add x23, x24, x21\n" + "add x22, x23, x21\n" "70:" // Height 3: input setup done "cmp x25, #0x10\n" "blt 75f\n" @@ -909,12 +908,12 @@ void a64_hybrid_u8qa_mmla_4x16 ( ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n" "trn1 v2.2d, v3.2d, v4.2d\n" ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n" - "ldr q5, [x28, #0x70]\n" + "ldr q14, [x28, #0x70]\n" ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n" "trn2 v3.2d, v3.2d, v4.2d\n" - "ldr q4, [x28, #0x60]\n" + "ldr q5, [x28, #0x60]\n" ".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n" - "ldr q6, [x28, #0x80]\n" + "ldr q4, [x28, #0x80]\n" ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" ".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n" "ldr q7, [x28, #0x90]\n" @@ -930,15 +929,15 @@ void a64_hybrid_u8qa_mmla_4x16 ( ".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n" ".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n" "ldr q10, [x28, #0xc0]\n" - ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n" - ".inst 0x6e84a45b // ummla v27.4s, v2.16b, v4.16b\n" - "ldr q4, [x28, #0xd0]\n" - ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n" - ".inst 0x6e85a45f // ummla v31.4s, v2.16b, v5.16b\n" + ".inst 0x6e85a413 // ummla v19.4s, v0.16b, v5.16b\n" + ".inst 0x6e85a45b // ummla v27.4s, v2.16b, v5.16b\n" + "ldr q6, [x28, #0xd0]\n" + ".inst 0x6e8ea417 // ummla v23.4s, v0.16b, v14.16b\n" + ".inst 0x6e8ea45f // ummla v31.4s, v2.16b, v14.16b\n" "ldr q5, [x28, #0xe0]\n" - ".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n" - ".inst 0x6e86a478 // ummla v24.4s, v3.16b, v6.16b\n" - "ldr q6, [x28, #0xf0]\n" + ".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n" + ".inst 0x6e84a478 // ummla v24.4s, v3.16b, v4.16b\n" + "ldr q4, [x28, #0xf0]\n" "add x28, x28, #0x100\n" ".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n" ".inst 0x6e87a47c // ummla v28.4s, v3.16b, v7.16b\n" @@ -948,12 +947,12 @@ void a64_hybrid_u8qa_mmla_4x16 ( ".inst 0x6e89a47d // ummla v29.4s, v3.16b, v9.16b\n" ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n" ".inst 0x6e8aa47a // ummla v26.4s, v3.16b, v10.16b\n" - ".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n" - ".inst 0x6e84a47e // ummla v30.4s, v3.16b, v4.16b\n" + ".inst 0x6e86a436 // ummla v22.4s, v1.16b, v6.16b\n" + ".inst 0x6e86a47e // ummla v30.4s, v3.16b, v6.16b\n" ".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n" ".inst 0x6e85a47b // ummla v27.4s, v3.16b, v5.16b\n" - ".inst 0x6e86a437 // ummla v23.4s, v1.16b, v6.16b\n" - ".inst 0x6e86a47f // ummla v31.4s, v3.16b, v6.16b\n" + ".inst 0x6e84a437 // ummla v23.4s, v1.16b, v4.16b\n" + ".inst 0x6e84a47f // ummla v31.4s, v3.16b, v4.16b\n" "tbnz %x[flags], #31, 72f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n" @@ -981,12 +980,12 @@ void a64_hybrid_u8qa_mmla_4x16 ( ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n" "trn1 v2.2d, v3.2d, v4.2d\n" ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n" - "ldr q5, [x28, #0x70]\n" + "ldr q14, [x28, #0x70]\n" ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n" "trn2 v3.2d, v3.2d, v4.2d\n" - "ldr q4, [x28, #0x60]\n" + "ldr q5, [x28, #0x60]\n" ".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n" - "ldr q6, [x28, #0x80]\n" + "ldr q4, [x28, #0x80]\n" ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" ".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n" "ldr q7, [x28, #0x90]\n" @@ -1003,15 +1002,15 @@ void a64_hybrid_u8qa_mmla_4x16 ( ".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n" "ldr q10, [x28, #0xc0]\n" "add x22, x22, #0x10\n" - ".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n" - ".inst 0x6e84a45b // ummla v27.4s, v2.16b, v4.16b\n" - "ldr q4, [x28, #0xd0]\n" - ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n" - ".inst 0x6e85a45f // ummla v31.4s, v2.16b, v5.16b\n" + ".inst 0x6e85a413 // ummla v19.4s, v0.16b, v5.16b\n" + ".inst 0x6e85a45b // ummla v27.4s, v2.16b, v5.16b\n" + "ldr q6, [x28, #0xd0]\n" + ".inst 0x6e8ea417 // ummla v23.4s, v0.16b, v14.16b\n" + ".inst 0x6e8ea45f // ummla v31.4s, v2.16b, v14.16b\n" "ldr q5, [x28, #0xe0]\n" - ".inst 0x6e86a430 // ummla v16.4s, v1.16b, v6.16b\n" - ".inst 0x6e86a478 // ummla v24.4s, v3.16b, v6.16b\n" - "ldr q6, [x28, #0xf0]\n" + ".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n" + ".inst 0x6e84a478 // ummla v24.4s, v3.16b, v4.16b\n" + "ldr q4, [x28, #0xf0]\n" "add x28, x28, #0x100\n" ".inst 0x6e87a434 // ummla v20.4s, v1.16b, v7.16b\n" ".inst 0x6e87a47c // ummla v28.4s, v3.16b, v7.16b\n" @@ -1021,12 +1020,12 @@ void a64_hybrid_u8qa_mmla_4x16 ( ".inst 0x6e89a47d // ummla v29.4s, v3.16b, v9.16b\n" ".inst 0x6e8aa432 // ummla v18.4s, v1.16b, v10.16b\n" ".inst 0x6e8aa47a // ummla v26.4s, v3.16b, v10.16b\n" - ".inst 0x6e84a436 // ummla v22.4s, v1.16b, v4.16b\n" - ".inst 0x6e84a47e // ummla v30.4s, v3.16b, v4.16b\n" + ".inst 0x6e86a436 // ummla v22.4s, v1.16b, v6.16b\n" + ".inst 0x6e86a47e // ummla v30.4s, v3.16b, v6.16b\n" ".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n" ".inst 0x6e85a47b // ummla v27.4s, v3.16b, v5.16b\n" - ".inst 0x6e86a437 // ummla v23.4s, v1.16b, v6.16b\n" - ".inst 0x6e86a47f // ummla v31.4s, v3.16b, v6.16b\n" + ".inst 0x6e84a437 // ummla v23.4s, v1.16b, v4.16b\n" + ".inst 0x6e84a47f // ummla v31.4s, v3.16b, v4.16b\n" "tbnz %x[flags], #31, 74f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n" @@ -1042,41 +1041,41 @@ void a64_hybrid_u8qa_mmla_4x16 ( "blt 78f\n" "76:" // Height 3: Multiply loop: Odd block loop "ldr d1, [x24], #0x8\n" - "ldr d2, [x23], #0x8\n" - "trn1 v0.2d, v1.2d, v2.2d\n" - "ldr d3, [x22], #0x8\n" - "trn1 v2.2d, v3.2d, v7.2d\n" + "ldr d0, [x23], #0x8\n" + "trn1 v0.2d, v1.2d, v0.2d\n" + "ldr d1, [x22], #0x8\n" + "trn1 v2.2d, v1.2d, v2.2d\n" "tbnz %x[flags], #31, 77f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n" "77:" // Height 3: Multiply loop: unique 11: skip row sum - "ldr q8, [x28, #0x0]\n" - "ldr q9, [x28, #0x10]\n" - ".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n" - ".inst 0x6e88a458 // ummla v24.4s, v2.16b, v8.16b\n" - "ldr q10, [x28, #0x20]\n" - "ldr q4, [x28, #0x30]\n" + "ldr q3, [x28, #0x0]\n" + "ldr q1, [x28, #0x10]\n" + ".inst 0x6e83a410 // ummla v16.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a458 // ummla v24.4s, v2.16b, v3.16b\n" + "ldr q7, [x28, #0x20]\n" + "ldr q6, [x28, #0x30]\n" "sub x25, x25, #0x8\n" "cmp x25, #0x8\n" "ldr q5, [x28, #0x40]\n" - "ldr q6, [x28, #0x50]\n" - ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n" - ".inst 0x6e89a45c // ummla v28.4s, v2.16b, v9.16b\n" - "ldr q7, [x28, #0x60]\n" - "ldr q8, [x28, #0x70]\n" - ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n" - ".inst 0x6e8aa459 // ummla v25.4s, v2.16b, v10.16b\n" - ".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n" - ".inst 0x6e84a45d // ummla v29.4s, v2.16b, v4.16b\n" + "ldr q4, [x28, #0x50]\n" + ".inst 0x6e81a414 // ummla v20.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a45c // ummla v28.4s, v2.16b, v1.16b\n" + "ldr q3, [x28, #0x60]\n" + "ldr q1, [x28, #0x70]\n" + ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" + ".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n" + ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n" + ".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n" "add x28, x28, #0x80\n" ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n" ".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n" - ".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n" - ".inst 0x6e86a45e // ummla v30.4s, v2.16b, v6.16b\n" - ".inst 0x6e87a413 // ummla v19.4s, v0.16b, v7.16b\n" - ".inst 0x6e87a45b // ummla v27.4s, v2.16b, v7.16b\n" - ".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n" - ".inst 0x6e88a45f // ummla v31.4s, v2.16b, v8.16b\n" + ".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n" + ".inst 0x6e84a45e // ummla v30.4s, v2.16b, v4.16b\n" + ".inst 0x6e83a413 // ummla v19.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a45b // ummla v27.4s, v2.16b, v3.16b\n" + ".inst 0x6e81a417 // ummla v23.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a45f // ummla v31.4s, v2.16b, v1.16b\n" "bge 76b\n" "78:" // Height 3: Multiply loop: Skip odd blocks "cbz x25, 84f\n" @@ -1115,52 +1114,52 @@ void a64_hybrid_u8qa_mmla_4x16 ( "ldr b3, [x22, #0x0]\n" "82:" // Height 3: Multiply loop: Ragged operand read: Done "trn1 v0.2d, v1.2d, v2.2d\n" - "trn1 v2.2d, v3.2d, v9.2d\n" + "trn1 v2.2d, v3.2d, v4.2d\n" "tbnz %x[flags], #31, 83f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n" "83:" // Height 3: Multiply loop: unique 12: skip row sum - "ldr q10, [x28, #0x0]\n" - "ldr q4, [x28, #0x10]\n" - ".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n" - ".inst 0x6e8aa458 // ummla v24.4s, v2.16b, v10.16b\n" - "ldr q5, [x28, #0x20]\n" + "ldr q1, [x28, #0x0]\n" + "ldr q3, [x28, #0x10]\n" + ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n" + "ldr q1, [x28, #0x20]\n" "ldr q6, [x28, #0x30]\n" - ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n" - ".inst 0x6e84a45c // ummla v28.4s, v2.16b, v4.16b\n" - "ldr q7, [x28, #0x40]\n" - "ldr q8, [x28, #0x50]\n" - ".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n" - ".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n" - "ldr q9, [x28, #0x60]\n" - "ldr q10, [x28, #0x70]\n" + ".inst 0x6e83a414 // ummla v20.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a45c // ummla v28.4s, v2.16b, v3.16b\n" + "ldr q5, [x28, #0x40]\n" + "ldr q4, [x28, #0x50]\n" + ".inst 0x6e81a411 // ummla v17.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a459 // ummla v25.4s, v2.16b, v1.16b\n" + "ldr q3, [x28, #0x60]\n" + "ldr q1, [x28, #0x70]\n" ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n" ".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n" - ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n" - ".inst 0x6e87a45a // ummla v26.4s, v2.16b, v7.16b\n" + ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n" + ".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n" "add x28, x28, #0x80\n" - ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n" - ".inst 0x6e88a45e // ummla v30.4s, v2.16b, v8.16b\n" - ".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n" - ".inst 0x6e89a45b // ummla v27.4s, v2.16b, v9.16b\n" - ".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n" - ".inst 0x6e8aa45f // ummla v31.4s, v2.16b, v10.16b\n" + ".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n" + ".inst 0x6e84a45e // ummla v30.4s, v2.16b, v4.16b\n" + ".inst 0x6e83a413 // ummla v19.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a45b // ummla v27.4s, v2.16b, v3.16b\n" + ".inst 0x6e81a417 // ummla v23.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a45f // ummla v31.4s, v2.16b, v1.16b\n" "84:" // Height 3: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x26, x26, #0x1\n" "cmp x26, x20\n" "bne 68b\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "uzp1 v4.2d, v16.2d, v20.2d\n" - "add x22, x27, x20\n" - "add x21, x22, x20\n" + "uzp1 v0.2d, v16.2d, v20.2d\n" + "add x23, x27, x20\n" + "add x22, x23, x20\n" "uzp2 v16.2d, v16.2d, v20.2d\n" "uzp1 v20.2d, v17.2d, v21.2d\n" "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x22, #0x0]\n" + "prfm pstl1keep, [x23, #0x0]\n" "uzp2 v17.2d, v17.2d, v21.2d\n" "uzp1 v21.2d, v18.2d, v22.2d\n" - "prfm pstl1keep, [x21, #0x0]\n" + "prfm pstl1keep, [x22, #0x0]\n" "uzp2 v18.2d, v18.2d, v22.2d\n" "uzp1 v22.2d, v19.2d, v23.2d\n" "uzp2 v19.2d, v19.2d, v23.2d\n" @@ -1168,116 +1167,116 @@ void a64_hybrid_u8qa_mmla_4x16 ( "uzp1 v25.2d, v25.2d, v29.2d\n" "uzp1 v26.2d, v26.2d, v30.2d\n" "uzp1 v27.2d, v27.2d, v31.2d\n" - "mov v31.16b, v4.16b\n" + "mov v31.16b, v0.16b\n" "tbnz %x[flags], #31, 85f\n" - "add x23, %x[qp], %[b_offset]\n" - "ld1r { v3.4s }, [x23]\n" + "add x20, %x[qp], %[b_offset]\n" + "ld1r { v23.4s }, [x20]\n" "addp v11.4s, v11.4s, v11.4s\n" "addp v13.4s, v13.4s, v13.4s\n" - "neg v3.4s, v3.4s\n" + "neg v23.4s, v23.4s\n" "dup v12.4s, v11.s[3]\n" "dup v11.4s, v11.s[0]\n" "dup v13.4s, v13.s[0]\n" - "mul v11.4s, v11.4s, v3.4s\n" - "mul v12.4s, v12.4s, v3.4s\n" - "mul v13.4s, v13.4s, v3.4s\n" + "mul v11.4s, v11.4s, v23.4s\n" + "mul v12.4s, v12.4s, v23.4s\n" + "mul v13.4s, v13.4s, v23.4s\n" "85:" // Height 3: skip row sum fixup "ldr q0, [x10, #0x0]\n" - "ldr q1, [x10, #0x10]\n" + "ldr q30, [x10, #0x10]\n" "add v31.4s, v31.4s, v11.4s\n" "add v20.4s, v20.4s, v11.4s\n" - "ldr q2, [x10, #0x20]\n" - "ldr q3, [x10, #0x30]\n" + "ldr q29, [x10, #0x20]\n" + "ldr q28, [x10, #0x30]\n" "add v21.4s, v21.4s, v11.4s\n" "add v22.4s, v22.4s, v11.4s\n" "add v16.4s, v16.4s, v12.4s\n" "add v17.4s, v17.4s, v12.4s\n" - "add x23, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v23.4s }, [x20]\n" "add v18.4s, v18.4s, v12.4s\n" "add v19.4s, v19.4s, v12.4s\n" "orr %x[flags], %x[flags], #0x80000000\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" "add v24.4s, v24.4s, v13.4s\n" "add v25.4s, v25.4s, v13.4s\n" "add x10, x10, #0x40\n" "add v26.4s, v26.4s, v13.4s\n" "add v27.4s, v27.4s, v13.4s\n" "add v31.4s, v31.4s, v0.4s\n" - "add v20.4s, v20.4s, v1.4s\n" - "add v21.4s, v21.4s, v2.4s\n" - "add v22.4s, v22.4s, v3.4s\n" + "add v20.4s, v20.4s, v30.4s\n" + "add v21.4s, v21.4s, v29.4s\n" + "add v22.4s, v22.4s, v28.4s\n" "add v16.4s, v16.4s, v0.4s\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add v19.4s, v19.4s, v3.4s\n" + "add v17.4s, v17.4s, v30.4s\n" + "add v18.4s, v18.4s, v29.4s\n" + "add v19.4s, v19.4s, v28.4s\n" "add v24.4s, v24.4s, v0.4s\n" - "ld1r { v0.4s }, [x23]\n" - "add v25.4s, v25.4s, v1.4s\n" - "add v26.4s, v26.4s, v2.4s\n" - "add v27.4s, v27.4s, v3.4s\n" - "sqrdmulh v31.4s, v31.4s, v4.4s\n" - "sqrdmulh v20.4s, v20.4s, v4.4s\n" - "sqrdmulh v21.4s, v21.4s, v4.4s\n" - "sqrdmulh v22.4s, v22.4s, v4.4s\n" - "sqrdmulh v16.4s, v16.4s, v4.4s\n" - "sqrdmulh v17.4s, v17.4s, v4.4s\n" - "sqrdmulh v18.4s, v18.4s, v4.4s\n" - "sqrdmulh v19.4s, v19.4s, v4.4s\n" - "sqrdmulh v24.4s, v24.4s, v4.4s\n" - "sqrdmulh v25.4s, v25.4s, v4.4s\n" - "sqrdmulh v26.4s, v26.4s, v4.4s\n" - "sqrdmulh v27.4s, v27.4s, v4.4s\n" + "ld1r { v0.4s }, [x20]\n" + "add v25.4s, v25.4s, v30.4s\n" + "add v26.4s, v26.4s, v29.4s\n" + "add v27.4s, v27.4s, v28.4s\n" + "sqrdmulh v31.4s, v31.4s, v23.4s\n" + "sqrdmulh v20.4s, v20.4s, v23.4s\n" + "sqrdmulh v21.4s, v21.4s, v23.4s\n" + "sqrdmulh v22.4s, v22.4s, v23.4s\n" + "sqrdmulh v16.4s, v16.4s, v23.4s\n" + "sqrdmulh v17.4s, v17.4s, v23.4s\n" + "sqrdmulh v18.4s, v18.4s, v23.4s\n" + "sqrdmulh v19.4s, v19.4s, v23.4s\n" + "sqrdmulh v24.4s, v24.4s, v23.4s\n" + "sqrdmulh v25.4s, v25.4s, v23.4s\n" + "sqrdmulh v26.4s, v26.4s, v23.4s\n" + "sqrdmulh v27.4s, v27.4s, v23.4s\n" "tbz %x[flags], #5, 86f\n" - "and v4.16b, v31.16b, v0.16b\n" - "and v5.16b, v20.16b, v0.16b\n" - "and v6.16b, v21.16b, v0.16b\n" - "and v7.16b, v22.16b, v0.16b\n" - "and v8.16b, v16.16b, v0.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "sqadd v31.4s, v31.4s, v4.4s\n" - "sqadd v20.4s, v20.4s, v5.4s\n" - "sqadd v21.4s, v21.4s, v6.4s\n" - "sqadd v22.4s, v22.4s, v7.4s\n" - "sqadd v16.4s, v16.4s, v8.4s\n" - "and v9.16b, v17.16b, v0.16b\n" - "and v10.16b, v18.16b, v0.16b\n" - "and v4.16b, v19.16b, v0.16b\n" - "and v5.16b, v24.16b, v0.16b\n" - "and v6.16b, v25.16b, v0.16b\n" - "and v7.16b, v26.16b, v0.16b\n" - "and v8.16b, v27.16b, v0.16b\n" - "sshr v9.4s, v9.4s, #0x1f\n" - "sshr v10.4s, v10.4s, #0x1f\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "sqadd v17.4s, v17.4s, v9.4s\n" - "sqadd v18.4s, v18.4s, v10.4s\n" - "sqadd v19.4s, v19.4s, v4.4s\n" - "sqadd v24.4s, v24.4s, v5.4s\n" - "sqadd v25.4s, v25.4s, v6.4s\n" - "sqadd v26.4s, v26.4s, v7.4s\n" - "sqadd v27.4s, v27.4s, v8.4s\n" + "and v1.16b, v31.16b, v0.16b\n" + "and v30.16b, v20.16b, v0.16b\n" + "and v29.16b, v21.16b, v0.16b\n" + "and v28.16b, v22.16b, v0.16b\n" + "and v23.16b, v16.16b, v0.16b\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "sqadd v31.4s, v31.4s, v1.4s\n" + "sqadd v20.4s, v20.4s, v30.4s\n" + "sqadd v21.4s, v21.4s, v29.4s\n" + "sqadd v22.4s, v22.4s, v28.4s\n" + "sqadd v16.4s, v16.4s, v23.4s\n" + "and v3.16b, v17.16b, v0.16b\n" + "and v2.16b, v18.16b, v0.16b\n" + "and v1.16b, v19.16b, v0.16b\n" + "and v30.16b, v24.16b, v0.16b\n" + "and v29.16b, v25.16b, v0.16b\n" + "and v28.16b, v26.16b, v0.16b\n" + "and v23.16b, v27.16b, v0.16b\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sshr v2.4s, v2.4s, #0x1f\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "sqadd v17.4s, v17.4s, v3.4s\n" + "sqadd v18.4s, v18.4s, v2.4s\n" + "sqadd v19.4s, v19.4s, v1.4s\n" + "sqadd v24.4s, v24.4s, v30.4s\n" + "sqadd v25.4s, v25.4s, v29.4s\n" + "sqadd v26.4s, v26.4s, v28.4s\n" + "sqadd v27.4s, v27.4s, v23.4s\n" "86:" // Height 3: no shift correction - "add x23, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v29.4s }, [x20]\n" "srshl v31.4s, v31.4s, v0.4s\n" "srshl v20.4s, v20.4s, v0.4s\n" "srshl v21.4s, v21.4s, v0.4s\n" "srshl v22.4s, v22.4s, v0.4s\n" - "add x23, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x23]\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v28.4s }, [x20]\n" "srshl v16.4s, v16.4s, v0.4s\n" "srshl v17.4s, v17.4s, v0.4s\n" - "add x23, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x23]\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v23.4s }, [x20]\n" "srshl v18.4s, v18.4s, v0.4s\n" "srshl v19.4s, v19.4s, v0.4s\n" "cmp x9, #0x10\n" @@ -1285,132 +1284,132 @@ void a64_hybrid_u8qa_mmla_4x16 ( "srshl v25.4s, v25.4s, v0.4s\n" "srshl v26.4s, v26.4s, v0.4s\n" "srshl v27.4s, v27.4s, v0.4s\n" - "add v31.4s, v31.4s, v4.4s\n" - "add v20.4s, v20.4s, v4.4s\n" - "add v21.4s, v21.4s, v4.4s\n" - "add v22.4s, v22.4s, v4.4s\n" - "add v16.4s, v16.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" - "add v24.4s, v24.4s, v4.4s\n" - "add v25.4s, v25.4s, v4.4s\n" - "add v26.4s, v26.4s, v4.4s\n" - "add v27.4s, v27.4s, v4.4s\n" - "smin v31.4s, v31.4s, v6.4s\n" - "smin v20.4s, v20.4s, v6.4s\n" - "smin v21.4s, v21.4s, v6.4s\n" - "smin v22.4s, v22.4s, v6.4s\n" - "smin v16.4s, v16.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "smin v24.4s, v24.4s, v6.4s\n" - "smin v25.4s, v25.4s, v6.4s\n" - "smin v26.4s, v26.4s, v6.4s\n" - "smin v27.4s, v27.4s, v6.4s\n" - "smax v31.4s, v31.4s, v5.4s\n" - "smax v20.4s, v20.4s, v5.4s\n" - "smax v21.4s, v21.4s, v5.4s\n" - "smax v22.4s, v22.4s, v5.4s\n" - "smax v16.4s, v16.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" - "smax v24.4s, v24.4s, v5.4s\n" - "smax v25.4s, v25.4s, v5.4s\n" - "smax v26.4s, v26.4s, v5.4s\n" - "smax v27.4s, v27.4s, v5.4s\n" + "add v31.4s, v31.4s, v29.4s\n" + "add v20.4s, v20.4s, v29.4s\n" + "add v21.4s, v21.4s, v29.4s\n" + "add v22.4s, v22.4s, v29.4s\n" + "add v16.4s, v16.4s, v29.4s\n" + "add v17.4s, v17.4s, v29.4s\n" + "add v18.4s, v18.4s, v29.4s\n" + "add v19.4s, v19.4s, v29.4s\n" + "add v24.4s, v24.4s, v29.4s\n" + "add v25.4s, v25.4s, v29.4s\n" + "add v26.4s, v26.4s, v29.4s\n" + "add v27.4s, v27.4s, v29.4s\n" + "smin v31.4s, v31.4s, v28.4s\n" + "smin v20.4s, v20.4s, v28.4s\n" + "smin v21.4s, v21.4s, v28.4s\n" + "smin v22.4s, v22.4s, v28.4s\n" + "smin v16.4s, v16.4s, v28.4s\n" + "smin v17.4s, v17.4s, v28.4s\n" + "smin v18.4s, v18.4s, v28.4s\n" + "smin v19.4s, v19.4s, v28.4s\n" + "smin v24.4s, v24.4s, v28.4s\n" + "smin v25.4s, v25.4s, v28.4s\n" + "smin v26.4s, v26.4s, v28.4s\n" + "smin v27.4s, v27.4s, v28.4s\n" + "smax v31.4s, v31.4s, v23.4s\n" + "smax v20.4s, v20.4s, v23.4s\n" + "smax v21.4s, v21.4s, v23.4s\n" + "smax v22.4s, v22.4s, v23.4s\n" + "smax v16.4s, v16.4s, v23.4s\n" + "smax v17.4s, v17.4s, v23.4s\n" + "smax v18.4s, v18.4s, v23.4s\n" + "smax v19.4s, v19.4s, v23.4s\n" + "smax v24.4s, v24.4s, v23.4s\n" + "smax v25.4s, v25.4s, v23.4s\n" + "smax v26.4s, v26.4s, v23.4s\n" + "smax v27.4s, v27.4s, v23.4s\n" "uzp1 v31.8h, v31.8h, v20.8h\n" "uzp1 v20.8h, v21.8h, v22.8h\n" "uzp1 v16.8h, v16.8h, v17.8h\n" - "uzp1 v17.8h, v18.8h, v19.8h\n" + "uzp1 v18.8h, v18.8h, v19.8h\n" "uzp1 v24.8h, v24.8h, v25.8h\n" - "uzp1 v25.8h, v26.8h, v27.8h\n" + "uzp1 v17.8h, v26.8h, v27.8h\n" "uzp1 v31.16b, v31.16b, v20.16b\n" - "uzp1 v16.16b, v16.16b, v17.16b\n" - "uzp1 v24.16b, v24.16b, v25.16b\n" + "uzp1 v16.16b, v16.16b, v18.16b\n" + "uzp1 v24.16b, v24.16b, v17.16b\n" "bge 95f\n" "tbz x9, #3, 90f\n" "str d31, [x27], #0x8\n" - "str d16, [x22], #0x8\n" - "str d24, [x21], #0x8\n" + "str d16, [x23], #0x8\n" + "str d24, [x22], #0x8\n" "tbz x9, #2, 88f\n" "st1 { v31.s }[2], [x27], #0x4\n" - "st1 { v16.s }[2], [x22], #0x4\n" - "st1 { v24.s }[2], [x21], #0x4\n" + "st1 { v16.s }[2], [x23], #0x4\n" + "st1 { v24.s }[2], [x22], #0x4\n" "tbz x9, #1, 87f\n" "st1 { v31.h }[6], [x27], #0x2\n" - "st1 { v16.h }[6], [x22], #0x2\n" - "st1 { v24.h }[6], [x21], #0x2\n" + "st1 { v16.h }[6], [x23], #0x2\n" + "st1 { v24.h }[6], [x22], #0x2\n" "tbz x9, #0, 94f\n" "st1 { v31.b }[14], [x27]\n" - "st1 { v16.b }[14], [x22]\n" - "st1 { v24.b }[14], [x21]\n" + "st1 { v16.b }[14], [x23]\n" + "st1 { v24.b }[14], [x22]\n" "b 94f\n" "87:" // Height 3: Partial direct writeback: partial_1_12 "tbz x9, #0, 94f\n" "st1 { v31.b }[12], [x27]\n" - "st1 { v16.b }[12], [x22]\n" - "st1 { v24.b }[12], [x21]\n" + "st1 { v16.b }[12], [x23]\n" + "st1 { v24.b }[12], [x22]\n" "b 94f\n" "88:" // Height 3: Partial direct writeback: partial_2_8 "tbz x9, #1, 89f\n" "st1 { v31.h }[4], [x27], #0x2\n" - "st1 { v16.h }[4], [x22], #0x2\n" - "st1 { v24.h }[4], [x21], #0x2\n" + "st1 { v16.h }[4], [x23], #0x2\n" + "st1 { v24.h }[4], [x22], #0x2\n" "tbz x9, #0, 94f\n" "st1 { v31.b }[10], [x27]\n" - "st1 { v16.b }[10], [x22]\n" - "st1 { v24.b }[10], [x21]\n" + "st1 { v16.b }[10], [x23]\n" + "st1 { v24.b }[10], [x22]\n" "b 94f\n" "89:" // Height 3: Partial direct writeback: partial_1_8 "tbz x9, #0, 94f\n" "st1 { v31.b }[8], [x27]\n" - "st1 { v16.b }[8], [x22]\n" - "st1 { v24.b }[8], [x21]\n" + "st1 { v16.b }[8], [x23]\n" + "st1 { v24.b }[8], [x22]\n" "b 94f\n" "90:" // Height 3: Partial direct writeback: partial_4_0 "tbz x9, #2, 92f\n" "str s31, [x27], #0x4\n" - "str s16, [x22], #0x4\n" - "str s24, [x21], #0x4\n" + "str s16, [x23], #0x4\n" + "str s24, [x22], #0x4\n" "tbz x9, #1, 91f\n" "st1 { v31.h }[2], [x27], #0x2\n" - "st1 { v16.h }[2], [x22], #0x2\n" - "st1 { v24.h }[2], [x21], #0x2\n" + "st1 { v16.h }[2], [x23], #0x2\n" + "st1 { v24.h }[2], [x22], #0x2\n" "tbz x9, #0, 94f\n" "st1 { v31.b }[6], [x27]\n" - "st1 { v16.b }[6], [x22]\n" - "st1 { v24.b }[6], [x21]\n" + "st1 { v16.b }[6], [x23]\n" + "st1 { v24.b }[6], [x22]\n" "b 94f\n" "91:" // Height 3: Partial direct writeback: partial_1_4 "tbz x9, #0, 94f\n" "st1 { v31.b }[4], [x27]\n" - "st1 { v16.b }[4], [x22]\n" - "st1 { v24.b }[4], [x21]\n" + "st1 { v16.b }[4], [x23]\n" + "st1 { v24.b }[4], [x22]\n" "b 94f\n" "92:" // Height 3: Partial direct writeback: partial_2_0 "tbz x9, #1, 93f\n" "str h31, [x27], #0x2\n" - "str h16, [x22], #0x2\n" - "str h24, [x21], #0x2\n" + "str h16, [x23], #0x2\n" + "str h24, [x22], #0x2\n" "tbz x9, #0, 94f\n" "st1 { v31.b }[2], [x27]\n" - "st1 { v16.b }[2], [x22]\n" - "st1 { v24.b }[2], [x21]\n" + "st1 { v16.b }[2], [x23]\n" + "st1 { v24.b }[2], [x22]\n" "b 94f\n" "93:" // Height 3: Partial direct writeback: partial_1_0 "str b31, [x27, #0x0]\n" - "str b16, [x22, #0x0]\n" - "str b24, [x21, #0x0]\n" + "str b16, [x23, #0x0]\n" + "str b24, [x22, #0x0]\n" "94:" // Height 3: Partial direct writeback: Done "b 96f\n" "95:" // Height 3: Full writeback "str q31, [x27, #0x0]\n" "add x27, x27, #0x10\n" - "str q16, [x22, #0x0]\n" - "str q24, [x21, #0x0]\n" + "str q16, [x23, #0x0]\n" + "str q24, [x22, #0x0]\n" "96:" // Height 3: Writeback done "subs x9, x9, #0x10\n" "bgt 66b\n" @@ -1451,14 +1450,14 @@ void a64_hybrid_u8qa_mmla_4x16 ( "100:" // Height 4: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr w25, [x20, x26, LSL #0x2]\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 101f\n" - "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" - "add x21, x21, x20, LSL #3\n" - "ldr x24, [x21, #0x0]\n" - "ldr x23, [x21, #0x8]\n" - "ldr x22, [x21, #0x10]\n" - "ldr x21, [x21, #0x18]\n" + "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n" + "add x20, x20, x21, LSL #3\n" + "ldr x24, [x20, #0x0]\n" + "ldr x23, [x20, #0x8]\n" + "ldr x22, [x20, #0x10]\n" + "ldr x21, [x20, #0x18]\n" "cbnz x26, 102f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x24, x24, x20\n" @@ -1468,9 +1467,9 @@ void a64_hybrid_u8qa_mmla_4x16 ( "b 102f\n" "101:" // Height 4: setup direct input "mov x24, %x[input_ptr]\n" - "add x23, x24, x20\n" - "add x22, x23, x20\n" - "add x21, x22, x20\n" + "add x23, x24, x21\n" + "add x22, x23, x21\n" + "add x21, x22, x21\n" "102:" // Height 4: input setup done "cmp x25, #0x10\n" "blt 107f\n" @@ -1630,42 +1629,42 @@ void a64_hybrid_u8qa_mmla_4x16 ( "blt 110f\n" "108:" // Height 4: Multiply loop: Odd block loop "ldr d1, [x24], #0x8\n" - "ldr d2, [x23], #0x8\n" - "trn1 v0.2d, v1.2d, v2.2d\n" - "ldr d3, [x22], #0x8\n" - "ldr d7, [x21], #0x8\n" - "trn1 v2.2d, v3.2d, v7.2d\n" + "ldr d0, [x23], #0x8\n" + "trn1 v0.2d, v1.2d, v0.2d\n" + "ldr d2, [x22], #0x8\n" + "ldr d1, [x21], #0x8\n" + "trn1 v2.2d, v2.2d, v1.2d\n" "tbnz %x[flags], #31, 109f\n" ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n" "109:" // Height 4: Multiply loop: unique 15: skip row sum - "ldr q8, [x28, #0x0]\n" - "ldr q9, [x28, #0x10]\n" - ".inst 0x6e88a410 // ummla v16.4s, v0.16b, v8.16b\n" - ".inst 0x6e88a458 // ummla v24.4s, v2.16b, v8.16b\n" - "ldr q10, [x28, #0x20]\n" - "ldr q4, [x28, #0x30]\n" + "ldr q3, [x28, #0x0]\n" + "ldr q1, [x28, #0x10]\n" + ".inst 0x6e83a410 // ummla v16.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a458 // ummla v24.4s, v2.16b, v3.16b\n" + "ldr q7, [x28, #0x20]\n" + "ldr q6, [x28, #0x30]\n" "sub x25, x25, #0x8\n" "cmp x25, #0x8\n" "ldr q5, [x28, #0x40]\n" - "ldr q6, [x28, #0x50]\n" - ".inst 0x6e89a414 // ummla v20.4s, v0.16b, v9.16b\n" - ".inst 0x6e89a45c // ummla v28.4s, v2.16b, v9.16b\n" - "ldr q7, [x28, #0x60]\n" - "ldr q8, [x28, #0x70]\n" - ".inst 0x6e8aa411 // ummla v17.4s, v0.16b, v10.16b\n" - ".inst 0x6e8aa459 // ummla v25.4s, v2.16b, v10.16b\n" - ".inst 0x6e84a415 // ummla v21.4s, v0.16b, v4.16b\n" - ".inst 0x6e84a45d // ummla v29.4s, v2.16b, v4.16b\n" + "ldr q4, [x28, #0x50]\n" + ".inst 0x6e81a414 // ummla v20.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a45c // ummla v28.4s, v2.16b, v1.16b\n" + "ldr q3, [x28, #0x60]\n" + "ldr q1, [x28, #0x70]\n" + ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n" + ".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n" + ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n" + ".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n" "add x28, x28, #0x80\n" ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n" ".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n" - ".inst 0x6e86a416 // ummla v22.4s, v0.16b, v6.16b\n" - ".inst 0x6e86a45e // ummla v30.4s, v2.16b, v6.16b\n" - ".inst 0x6e87a413 // ummla v19.4s, v0.16b, v7.16b\n" - ".inst 0x6e87a45b // ummla v27.4s, v2.16b, v7.16b\n" - ".inst 0x6e88a417 // ummla v23.4s, v0.16b, v8.16b\n" - ".inst 0x6e88a45f // ummla v31.4s, v2.16b, v8.16b\n" + ".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n" + ".inst 0x6e84a45e // ummla v30.4s, v2.16b, v4.16b\n" + ".inst 0x6e83a413 // ummla v19.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a45b // ummla v27.4s, v2.16b, v3.16b\n" + ".inst 0x6e81a417 // ummla v23.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a45f // ummla v31.4s, v2.16b, v1.16b\n" "bge 108b\n" "110:" // Height 4: Multiply loop: Skip odd blocks "cbz x25, 116f\n" @@ -1716,51 +1715,51 @@ void a64_hybrid_u8qa_mmla_4x16 ( ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n" ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n" "115:" // Height 4: Multiply loop: unique 16: skip row sum - "ldr q10, [x28, #0x0]\n" - "ldr q4, [x28, #0x10]\n" - ".inst 0x6e8aa410 // ummla v16.4s, v0.16b, v10.16b\n" - ".inst 0x6e8aa458 // ummla v24.4s, v2.16b, v10.16b\n" - "ldr q5, [x28, #0x20]\n" + "ldr q1, [x28, #0x0]\n" + "ldr q3, [x28, #0x10]\n" + ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n" + "ldr q1, [x28, #0x20]\n" "ldr q6, [x28, #0x30]\n" - ".inst 0x6e84a414 // ummla v20.4s, v0.16b, v4.16b\n" - ".inst 0x6e84a45c // ummla v28.4s, v2.16b, v4.16b\n" - "ldr q7, [x28, #0x40]\n" - "ldr q8, [x28, #0x50]\n" - ".inst 0x6e85a411 // ummla v17.4s, v0.16b, v5.16b\n" - ".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n" - "ldr q9, [x28, #0x60]\n" - "ldr q10, [x28, #0x70]\n" + ".inst 0x6e83a414 // ummla v20.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a45c // ummla v28.4s, v2.16b, v3.16b\n" + "ldr q5, [x28, #0x40]\n" + "ldr q4, [x28, #0x50]\n" + ".inst 0x6e81a411 // ummla v17.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a459 // ummla v25.4s, v2.16b, v1.16b\n" + "ldr q3, [x28, #0x60]\n" + "ldr q1, [x28, #0x70]\n" ".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n" ".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n" - ".inst 0x6e87a412 // ummla v18.4s, v0.16b, v7.16b\n" - ".inst 0x6e87a45a // ummla v26.4s, v2.16b, v7.16b\n" + ".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n" + ".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n" "add x28, x28, #0x80\n" - ".inst 0x6e88a416 // ummla v22.4s, v0.16b, v8.16b\n" - ".inst 0x6e88a45e // ummla v30.4s, v2.16b, v8.16b\n" - ".inst 0x6e89a413 // ummla v19.4s, v0.16b, v9.16b\n" - ".inst 0x6e89a45b // ummla v27.4s, v2.16b, v9.16b\n" - ".inst 0x6e8aa417 // ummla v23.4s, v0.16b, v10.16b\n" - ".inst 0x6e8aa45f // ummla v31.4s, v2.16b, v10.16b\n" + ".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n" + ".inst 0x6e84a45e // ummla v30.4s, v2.16b, v4.16b\n" + ".inst 0x6e83a413 // ummla v19.4s, v0.16b, v3.16b\n" + ".inst 0x6e83a45b // ummla v27.4s, v2.16b, v3.16b\n" + ".inst 0x6e81a417 // ummla v23.4s, v0.16b, v1.16b\n" + ".inst 0x6e81a45f // ummla v31.4s, v2.16b, v1.16b\n" "116:" // Height 4: Multiply loop: No odd multiplies "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x26, x26, #0x1\n" "cmp x26, x20\n" "bne 100b\n" "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "uzp1 v4.2d, v16.2d, v20.2d\n" - "add x22, x27, x20\n" + "uzp1 v0.2d, v16.2d, v20.2d\n" + "add x23, x27, x20\n" + "add x22, x23, x20\n" "add x21, x22, x20\n" - "add x20, x21, x20\n" "uzp2 v16.2d, v16.2d, v20.2d\n" "uzp1 v20.2d, v17.2d, v21.2d\n" "prfm pstl1keep, [x27, #0x0]\n" "uzp2 v17.2d, v17.2d, v21.2d\n" "uzp1 v21.2d, v18.2d, v22.2d\n" + "prfm pstl1keep, [x23, #0x0]\n" "prfm pstl1keep, [x22, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "uzp2 v18.2d, v18.2d, v22.2d\n" "uzp1 v22.2d, v19.2d, v23.2d\n" - "prfm pstl1keep, [x20, #0x0]\n" + "prfm pstl1keep, [x21, #0x0]\n" "uzp2 v19.2d, v19.2d, v23.2d\n" "uzp1 v23.2d, v24.2d, v28.2d\n" "uzp2 v24.2d, v24.2d, v28.2d\n" @@ -1770,38 +1769,38 @@ void a64_hybrid_u8qa_mmla_4x16 ( "uzp2 v26.2d, v26.2d, v30.2d\n" "uzp1 v30.2d, v27.2d, v31.2d\n" "uzp2 v27.2d, v27.2d, v31.2d\n" - "mov v31.16b, v4.16b\n" + "mov v31.16b, v0.16b\n" "tbnz %x[flags], #31, 117f\n" - "add x23, %x[qp], %[b_offset]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[b_offset]\n" + "ld1r { v0.4s }, [x20]\n" "addp v11.4s, v11.4s, v11.4s\n" "addp v13.4s, v13.4s, v13.4s\n" - "neg v4.4s, v4.4s\n" + "neg v0.4s, v0.4s\n" "dup v12.4s, v11.s[3]\n" "dup v11.4s, v11.s[0]\n" "dup v14.4s, v13.s[3]\n" "dup v13.4s, v13.s[0]\n" - "mul v11.4s, v11.4s, v4.4s\n" - "mul v12.4s, v12.4s, v4.4s\n" - "mul v13.4s, v13.4s, v4.4s\n" - "mul v14.4s, v14.4s, v4.4s\n" + "mul v11.4s, v11.4s, v0.4s\n" + "mul v12.4s, v12.4s, v0.4s\n" + "mul v13.4s, v13.4s, v0.4s\n" + "mul v14.4s, v14.4s, v0.4s\n" "117:" // Height 4: skip row sum fixup "ldr q0, [x10, #0x0]\n" - "ldr q1, [x10, #0x10]\n" + "ldr q4, [x10, #0x10]\n" "add v31.4s, v31.4s, v11.4s\n" "add v20.4s, v20.4s, v11.4s\n" - "ldr q2, [x10, #0x20]\n" - "ldr q3, [x10, #0x30]\n" + "ldr q3, [x10, #0x20]\n" + "ldr q2, [x10, #0x30]\n" "add v21.4s, v21.4s, v11.4s\n" "add v22.4s, v22.4s, v11.4s\n" "add v16.4s, v16.4s, v12.4s\n" "add v17.4s, v17.4s, v12.4s\n" - "add x23, %x[qp], %[per_layer_mul]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[per_layer_mul]\n" + "ld1r { v1.4s }, [x20]\n" "add v18.4s, v18.4s, v12.4s\n" "add v19.4s, v19.4s, v12.4s\n" "orr %x[flags], %x[flags], #0x80000000\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" + "add x20, %x[qp], %[per_layer_right_shift]\n" "add v23.4s, v23.4s, v13.4s\n" "add v28.4s, v28.4s, v13.4s\n" "add x10, x10, #0x40\n" @@ -1812,100 +1811,100 @@ void a64_hybrid_u8qa_mmla_4x16 ( "add v26.4s, v26.4s, v14.4s\n" "add v27.4s, v27.4s, v14.4s\n" "add v31.4s, v31.4s, v0.4s\n" - "add v20.4s, v20.4s, v1.4s\n" - "add v21.4s, v21.4s, v2.4s\n" - "add v22.4s, v22.4s, v3.4s\n" + "add v20.4s, v20.4s, v4.4s\n" + "add v21.4s, v21.4s, v3.4s\n" + "add v22.4s, v22.4s, v2.4s\n" "add v16.4s, v16.4s, v0.4s\n" - "add v17.4s, v17.4s, v1.4s\n" - "add v18.4s, v18.4s, v2.4s\n" - "add v19.4s, v19.4s, v3.4s\n" + "add v17.4s, v17.4s, v4.4s\n" + "add v18.4s, v18.4s, v3.4s\n" + "add v19.4s, v19.4s, v2.4s\n" "add v23.4s, v23.4s, v0.4s\n" - "add v28.4s, v28.4s, v1.4s\n" - "add v29.4s, v29.4s, v2.4s\n" - "add v30.4s, v30.4s, v3.4s\n" + "add v28.4s, v28.4s, v4.4s\n" + "add v29.4s, v29.4s, v3.4s\n" + "add v30.4s, v30.4s, v2.4s\n" "add v24.4s, v24.4s, v0.4s\n" - "ld1r { v0.4s }, [x23]\n" - "add v25.4s, v25.4s, v1.4s\n" - "add v26.4s, v26.4s, v2.4s\n" - "add v27.4s, v27.4s, v3.4s\n" - "sqrdmulh v31.4s, v31.4s, v4.4s\n" - "sqrdmulh v20.4s, v20.4s, v4.4s\n" - "sqrdmulh v21.4s, v21.4s, v4.4s\n" - "sqrdmulh v22.4s, v22.4s, v4.4s\n" - "sqrdmulh v16.4s, v16.4s, v4.4s\n" - "sqrdmulh v17.4s, v17.4s, v4.4s\n" - "sqrdmulh v18.4s, v18.4s, v4.4s\n" - "sqrdmulh v19.4s, v19.4s, v4.4s\n" - "sqrdmulh v23.4s, v23.4s, v4.4s\n" - "sqrdmulh v28.4s, v28.4s, v4.4s\n" - "sqrdmulh v29.4s, v29.4s, v4.4s\n" - "sqrdmulh v30.4s, v30.4s, v4.4s\n" - "sqrdmulh v24.4s, v24.4s, v4.4s\n" - "sqrdmulh v25.4s, v25.4s, v4.4s\n" - "sqrdmulh v26.4s, v26.4s, v4.4s\n" - "sqrdmulh v27.4s, v27.4s, v4.4s\n" + "ld1r { v0.4s }, [x20]\n" + "add v25.4s, v25.4s, v4.4s\n" + "add v26.4s, v26.4s, v3.4s\n" + "add v27.4s, v27.4s, v2.4s\n" + "sqrdmulh v31.4s, v31.4s, v1.4s\n" + "sqrdmulh v20.4s, v20.4s, v1.4s\n" + "sqrdmulh v21.4s, v21.4s, v1.4s\n" + "sqrdmulh v22.4s, v22.4s, v1.4s\n" + "sqrdmulh v16.4s, v16.4s, v1.4s\n" + "sqrdmulh v17.4s, v17.4s, v1.4s\n" + "sqrdmulh v18.4s, v18.4s, v1.4s\n" + "sqrdmulh v19.4s, v19.4s, v1.4s\n" + "sqrdmulh v23.4s, v23.4s, v1.4s\n" + "sqrdmulh v28.4s, v28.4s, v1.4s\n" + "sqrdmulh v29.4s, v29.4s, v1.4s\n" + "sqrdmulh v30.4s, v30.4s, v1.4s\n" + "sqrdmulh v24.4s, v24.4s, v1.4s\n" + "sqrdmulh v25.4s, v25.4s, v1.4s\n" + "sqrdmulh v26.4s, v26.4s, v1.4s\n" + "sqrdmulh v27.4s, v27.4s, v1.4s\n" "tbz %x[flags], #5, 118f\n" - "and v4.16b, v31.16b, v0.16b\n" - "and v5.16b, v20.16b, v0.16b\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sqadd v31.4s, v31.4s, v4.4s\n" - "sqadd v20.4s, v20.4s, v5.4s\n" - "and v6.16b, v21.16b, v0.16b\n" - "and v7.16b, v22.16b, v0.16b\n" - "and v8.16b, v16.16b, v0.16b\n" - "and v9.16b, v17.16b, v0.16b\n" - "and v10.16b, v18.16b, v0.16b\n" - "and v4.16b, v19.16b, v0.16b\n" - "and v5.16b, v23.16b, v0.16b\n" - "sshr v6.4s, v6.4s, #0x1f\n" + "and v2.16b, v31.16b, v0.16b\n" + "and v1.16b, v20.16b, v0.16b\n" + "sshr v2.4s, v2.4s, #0x1f\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "sqadd v31.4s, v31.4s, v2.4s\n" + "sqadd v20.4s, v20.4s, v1.4s\n" + "and v7.16b, v21.16b, v0.16b\n" + "and v6.16b, v22.16b, v0.16b\n" + "and v5.16b, v16.16b, v0.16b\n" + "and v4.16b, v17.16b, v0.16b\n" + "and v3.16b, v18.16b, v0.16b\n" + "and v2.16b, v19.16b, v0.16b\n" + "and v1.16b, v23.16b, v0.16b\n" "sshr v7.4s, v7.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "sshr v9.4s, v9.4s, #0x1f\n" - "sshr v10.4s, v10.4s, #0x1f\n" - "sshr v4.4s, v4.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sqadd v21.4s, v21.4s, v6.4s\n" - "sqadd v22.4s, v22.4s, v7.4s\n" - "sqadd v16.4s, v16.4s, v8.4s\n" - "sqadd v17.4s, v17.4s, v9.4s\n" - "sqadd v18.4s, v18.4s, v10.4s\n" - "sqadd v19.4s, v19.4s, v4.4s\n" - "sqadd v23.4s, v23.4s, v5.4s\n" - "and v6.16b, v28.16b, v0.16b\n" - "and v7.16b, v29.16b, v0.16b\n" - "and v8.16b, v30.16b, v0.16b\n" - "and v9.16b, v24.16b, v0.16b\n" - "and v10.16b, v25.16b, v0.16b\n" - "and v4.16b, v26.16b, v0.16b\n" - "and v5.16b, v27.16b, v0.16b\n" "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "sshr v9.4s, v9.4s, #0x1f\n" - "sshr v10.4s, v10.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" "sshr v4.4s, v4.4s, #0x1f\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sshr v2.4s, v2.4s, #0x1f\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "sqadd v21.4s, v21.4s, v7.4s\n" + "sqadd v22.4s, v22.4s, v6.4s\n" + "sqadd v16.4s, v16.4s, v5.4s\n" + "sqadd v17.4s, v17.4s, v4.4s\n" + "sqadd v18.4s, v18.4s, v3.4s\n" + "sqadd v19.4s, v19.4s, v2.4s\n" + "sqadd v23.4s, v23.4s, v1.4s\n" + "and v7.16b, v28.16b, v0.16b\n" + "and v6.16b, v29.16b, v0.16b\n" + "and v5.16b, v30.16b, v0.16b\n" + "and v4.16b, v24.16b, v0.16b\n" + "and v3.16b, v25.16b, v0.16b\n" + "and v2.16b, v26.16b, v0.16b\n" + "and v1.16b, v27.16b, v0.16b\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "sshr v6.4s, v6.4s, #0x1f\n" "sshr v5.4s, v5.4s, #0x1f\n" - "sqadd v28.4s, v28.4s, v6.4s\n" - "sqadd v29.4s, v29.4s, v7.4s\n" - "sqadd v30.4s, v30.4s, v8.4s\n" - "sqadd v24.4s, v24.4s, v9.4s\n" - "sqadd v25.4s, v25.4s, v10.4s\n" - "sqadd v26.4s, v26.4s, v4.4s\n" - "sqadd v27.4s, v27.4s, v5.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sshr v2.4s, v2.4s, #0x1f\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "sqadd v28.4s, v28.4s, v7.4s\n" + "sqadd v29.4s, v29.4s, v6.4s\n" + "sqadd v30.4s, v30.4s, v5.4s\n" + "sqadd v24.4s, v24.4s, v4.4s\n" + "sqadd v25.4s, v25.4s, v3.4s\n" + "sqadd v26.4s, v26.4s, v2.4s\n" + "sqadd v27.4s, v27.4s, v1.4s\n" "118:" // Height 4: no shift correction - "add x23, %x[qp], %[c_offset]\n" - "ld1r { v4.4s }, [x23]\n" + "add x20, %x[qp], %[c_offset]\n" + "ld1r { v3.4s }, [x20]\n" "srshl v31.4s, v31.4s, v0.4s\n" "srshl v20.4s, v20.4s, v0.4s\n" "srshl v21.4s, v21.4s, v0.4s\n" "srshl v22.4s, v22.4s, v0.4s\n" - "add x23, %x[qp], %[maxval]\n" - "ld1r { v6.4s }, [x23]\n" + "add x20, %x[qp], %[maxval]\n" + "ld1r { v2.4s }, [x20]\n" "srshl v16.4s, v16.4s, v0.4s\n" "srshl v17.4s, v17.4s, v0.4s\n" - "add x23, %x[qp], %[minval]\n" - "ld1r { v5.4s }, [x23]\n" + "add x20, %x[qp], %[minval]\n" + "ld1r { v1.4s }, [x20]\n" "srshl v18.4s, v18.4s, v0.4s\n" "srshl v19.4s, v19.4s, v0.4s\n" "cmp x9, #0x10\n" @@ -1917,163 +1916,163 @@ void a64_hybrid_u8qa_mmla_4x16 ( "srshl v25.4s, v25.4s, v0.4s\n" "srshl v26.4s, v26.4s, v0.4s\n" "srshl v27.4s, v27.4s, v0.4s\n" - "add v31.4s, v31.4s, v4.4s\n" - "add v20.4s, v20.4s, v4.4s\n" - "add v21.4s, v21.4s, v4.4s\n" - "add v22.4s, v22.4s, v4.4s\n" - "add v16.4s, v16.4s, v4.4s\n" - "add v17.4s, v17.4s, v4.4s\n" - "add v18.4s, v18.4s, v4.4s\n" - "add v19.4s, v19.4s, v4.4s\n" - "add v23.4s, v23.4s, v4.4s\n" - "add v28.4s, v28.4s, v4.4s\n" - "add v29.4s, v29.4s, v4.4s\n" - "add v30.4s, v30.4s, v4.4s\n" - "add v24.4s, v24.4s, v4.4s\n" - "add v25.4s, v25.4s, v4.4s\n" - "add v26.4s, v26.4s, v4.4s\n" - "add v27.4s, v27.4s, v4.4s\n" - "smin v31.4s, v31.4s, v6.4s\n" - "smin v20.4s, v20.4s, v6.4s\n" - "smin v21.4s, v21.4s, v6.4s\n" - "smin v22.4s, v22.4s, v6.4s\n" - "smin v16.4s, v16.4s, v6.4s\n" - "smin v17.4s, v17.4s, v6.4s\n" - "smin v18.4s, v18.4s, v6.4s\n" - "smin v19.4s, v19.4s, v6.4s\n" - "smin v23.4s, v23.4s, v6.4s\n" - "smin v28.4s, v28.4s, v6.4s\n" - "smin v29.4s, v29.4s, v6.4s\n" - "smin v30.4s, v30.4s, v6.4s\n" - "smin v24.4s, v24.4s, v6.4s\n" - "smin v25.4s, v25.4s, v6.4s\n" - "smin v26.4s, v26.4s, v6.4s\n" - "smin v27.4s, v27.4s, v6.4s\n" - "smax v31.4s, v31.4s, v5.4s\n" - "smax v20.4s, v20.4s, v5.4s\n" - "smax v21.4s, v21.4s, v5.4s\n" - "smax v22.4s, v22.4s, v5.4s\n" - "smax v16.4s, v16.4s, v5.4s\n" - "smax v17.4s, v17.4s, v5.4s\n" - "smax v18.4s, v18.4s, v5.4s\n" - "smax v19.4s, v19.4s, v5.4s\n" - "smax v23.4s, v23.4s, v5.4s\n" - "smax v28.4s, v28.4s, v5.4s\n" - "smax v29.4s, v29.4s, v5.4s\n" - "smax v30.4s, v30.4s, v5.4s\n" - "smax v24.4s, v24.4s, v5.4s\n" - "smax v25.4s, v25.4s, v5.4s\n" - "smax v26.4s, v26.4s, v5.4s\n" - "smax v27.4s, v27.4s, v5.4s\n" + "add v31.4s, v31.4s, v3.4s\n" + "add v20.4s, v20.4s, v3.4s\n" + "add v21.4s, v21.4s, v3.4s\n" + "add v22.4s, v22.4s, v3.4s\n" + "add v16.4s, v16.4s, v3.4s\n" + "add v17.4s, v17.4s, v3.4s\n" + "add v18.4s, v18.4s, v3.4s\n" + "add v19.4s, v19.4s, v3.4s\n" + "add v23.4s, v23.4s, v3.4s\n" + "add v28.4s, v28.4s, v3.4s\n" + "add v29.4s, v29.4s, v3.4s\n" + "add v30.4s, v30.4s, v3.4s\n" + "add v24.4s, v24.4s, v3.4s\n" + "add v25.4s, v25.4s, v3.4s\n" + "add v26.4s, v26.4s, v3.4s\n" + "add v27.4s, v27.4s, v3.4s\n" + "smin v31.4s, v31.4s, v2.4s\n" + "smin v20.4s, v20.4s, v2.4s\n" + "smin v21.4s, v21.4s, v2.4s\n" + "smin v22.4s, v22.4s, v2.4s\n" + "smin v16.4s, v16.4s, v2.4s\n" + "smin v17.4s, v17.4s, v2.4s\n" + "smin v18.4s, v18.4s, v2.4s\n" + "smin v19.4s, v19.4s, v2.4s\n" + "smin v23.4s, v23.4s, v2.4s\n" + "smin v28.4s, v28.4s, v2.4s\n" + "smin v29.4s, v29.4s, v2.4s\n" + "smin v30.4s, v30.4s, v2.4s\n" + "smin v24.4s, v24.4s, v2.4s\n" + "smin v25.4s, v25.4s, v2.4s\n" + "smin v26.4s, v26.4s, v2.4s\n" + "smin v27.4s, v27.4s, v2.4s\n" + "smax v31.4s, v31.4s, v1.4s\n" + "smax v20.4s, v20.4s, v1.4s\n" + "smax v21.4s, v21.4s, v1.4s\n" + "smax v22.4s, v22.4s, v1.4s\n" + "smax v16.4s, v16.4s, v1.4s\n" + "smax v17.4s, v17.4s, v1.4s\n" + "smax v18.4s, v18.4s, v1.4s\n" + "smax v19.4s, v19.4s, v1.4s\n" + "smax v23.4s, v23.4s, v1.4s\n" + "smax v28.4s, v28.4s, v1.4s\n" + "smax v29.4s, v29.4s, v1.4s\n" + "smax v30.4s, v30.4s, v1.4s\n" + "smax v24.4s, v24.4s, v1.4s\n" + "smax v25.4s, v25.4s, v1.4s\n" + "smax v26.4s, v26.4s, v1.4s\n" + "smax v27.4s, v27.4s, v1.4s\n" "uzp1 v31.8h, v31.8h, v20.8h\n" "uzp1 v20.8h, v21.8h, v22.8h\n" "uzp1 v16.8h, v16.8h, v17.8h\n" - "uzp1 v17.8h, v18.8h, v19.8h\n" + "uzp1 v19.8h, v18.8h, v19.8h\n" "uzp1 v23.8h, v23.8h, v28.8h\n" - "uzp1 v28.8h, v29.8h, v30.8h\n" + "uzp1 v18.8h, v29.8h, v30.8h\n" "uzp1 v24.8h, v24.8h, v25.8h\n" - "uzp1 v25.8h, v26.8h, v27.8h\n" + "uzp1 v17.8h, v26.8h, v27.8h\n" "uzp1 v31.16b, v31.16b, v20.16b\n" - "uzp1 v16.16b, v16.16b, v17.16b\n" - "uzp1 v23.16b, v23.16b, v28.16b\n" - "uzp1 v24.16b, v24.16b, v25.16b\n" + "uzp1 v16.16b, v16.16b, v19.16b\n" + "uzp1 v23.16b, v23.16b, v18.16b\n" + "uzp1 v24.16b, v24.16b, v17.16b\n" "bge 127f\n" "tbz x9, #3, 122f\n" "str d31, [x27], #0x8\n" - "str d16, [x22], #0x8\n" - "str d23, [x21], #0x8\n" - "str d24, [x20], #0x8\n" + "str d16, [x23], #0x8\n" + "str d23, [x22], #0x8\n" + "str d24, [x21], #0x8\n" "tbz x9, #2, 120f\n" "st1 { v31.s }[2], [x27], #0x4\n" - "st1 { v16.s }[2], [x22], #0x4\n" - "st1 { v23.s }[2], [x21], #0x4\n" - "st1 { v24.s }[2], [x20], #0x4\n" + "st1 { v16.s }[2], [x23], #0x4\n" + "st1 { v23.s }[2], [x22], #0x4\n" + "st1 { v24.s }[2], [x21], #0x4\n" "tbz x9, #1, 119f\n" "st1 { v31.h }[6], [x27], #0x2\n" - "st1 { v16.h }[6], [x22], #0x2\n" - "st1 { v23.h }[6], [x21], #0x2\n" - "st1 { v24.h }[6], [x20], #0x2\n" + "st1 { v16.h }[6], [x23], #0x2\n" + "st1 { v23.h }[6], [x22], #0x2\n" + "st1 { v24.h }[6], [x21], #0x2\n" "tbz x9, #0, 126f\n" "st1 { v31.b }[14], [x27]\n" - "st1 { v16.b }[14], [x22]\n" - "st1 { v23.b }[14], [x21]\n" - "st1 { v24.b }[14], [x20]\n" + "st1 { v16.b }[14], [x23]\n" + "st1 { v23.b }[14], [x22]\n" + "st1 { v24.b }[14], [x21]\n" "b 126f\n" "119:" // Height 4: Partial direct writeback: partial_1_12 "tbz x9, #0, 126f\n" "st1 { v31.b }[12], [x27]\n" - "st1 { v16.b }[12], [x22]\n" - "st1 { v23.b }[12], [x21]\n" - "st1 { v24.b }[12], [x20]\n" + "st1 { v16.b }[12], [x23]\n" + "st1 { v23.b }[12], [x22]\n" + "st1 { v24.b }[12], [x21]\n" "b 126f\n" "120:" // Height 4: Partial direct writeback: partial_2_8 "tbz x9, #1, 121f\n" "st1 { v31.h }[4], [x27], #0x2\n" - "st1 { v16.h }[4], [x22], #0x2\n" - "st1 { v23.h }[4], [x21], #0x2\n" - "st1 { v24.h }[4], [x20], #0x2\n" + "st1 { v16.h }[4], [x23], #0x2\n" + "st1 { v23.h }[4], [x22], #0x2\n" + "st1 { v24.h }[4], [x21], #0x2\n" "tbz x9, #0, 126f\n" "st1 { v31.b }[10], [x27]\n" - "st1 { v16.b }[10], [x22]\n" - "st1 { v23.b }[10], [x21]\n" - "st1 { v24.b }[10], [x20]\n" + "st1 { v16.b }[10], [x23]\n" + "st1 { v23.b }[10], [x22]\n" + "st1 { v24.b }[10], [x21]\n" "b 126f\n" "121:" // Height 4: Partial direct writeback: partial_1_8 "tbz x9, #0, 126f\n" "st1 { v31.b }[8], [x27]\n" - "st1 { v16.b }[8], [x22]\n" - "st1 { v23.b }[8], [x21]\n" - "st1 { v24.b }[8], [x20]\n" + "st1 { v16.b }[8], [x23]\n" + "st1 { v23.b }[8], [x22]\n" + "st1 { v24.b }[8], [x21]\n" "b 126f\n" "122:" // Height 4: Partial direct writeback: partial_4_0 "tbz x9, #2, 124f\n" "str s31, [x27], #0x4\n" - "str s16, [x22], #0x4\n" - "str s23, [x21], #0x4\n" - "str s24, [x20], #0x4\n" + "str s16, [x23], #0x4\n" + "str s23, [x22], #0x4\n" + "str s24, [x21], #0x4\n" "tbz x9, #1, 123f\n" "st1 { v31.h }[2], [x27], #0x2\n" - "st1 { v16.h }[2], [x22], #0x2\n" - "st1 { v23.h }[2], [x21], #0x2\n" - "st1 { v24.h }[2], [x20], #0x2\n" + "st1 { v16.h }[2], [x23], #0x2\n" + "st1 { v23.h }[2], [x22], #0x2\n" + "st1 { v24.h }[2], [x21], #0x2\n" "tbz x9, #0, 126f\n" "st1 { v31.b }[6], [x27]\n" - "st1 { v16.b }[6], [x22]\n" - "st1 { v23.b }[6], [x21]\n" - "st1 { v24.b }[6], [x20]\n" + "st1 { v16.b }[6], [x23]\n" + "st1 { v23.b }[6], [x22]\n" + "st1 { v24.b }[6], [x21]\n" "b 126f\n" "123:" // Height 4: Partial direct writeback: partial_1_4 "tbz x9, #0, 126f\n" "st1 { v31.b }[4], [x27]\n" - "st1 { v16.b }[4], [x22]\n" - "st1 { v23.b }[4], [x21]\n" - "st1 { v24.b }[4], [x20]\n" + "st1 { v16.b }[4], [x23]\n" + "st1 { v23.b }[4], [x22]\n" + "st1 { v24.b }[4], [x21]\n" "b 126f\n" "124:" // Height 4: Partial direct writeback: partial_2_0 "tbz x9, #1, 125f\n" "str h31, [x27], #0x2\n" - "str h16, [x22], #0x2\n" - "str h23, [x21], #0x2\n" - "str h24, [x20], #0x2\n" + "str h16, [x23], #0x2\n" + "str h23, [x22], #0x2\n" + "str h24, [x21], #0x2\n" "tbz x9, #0, 126f\n" "st1 { v31.b }[2], [x27]\n" - "st1 { v16.b }[2], [x22]\n" - "st1 { v23.b }[2], [x21]\n" - "st1 { v24.b }[2], [x20]\n" + "st1 { v16.b }[2], [x23]\n" + "st1 { v23.b }[2], [x22]\n" + "st1 { v24.b }[2], [x21]\n" "b 126f\n" "125:" // Height 4: Partial direct writeback: partial_1_0 "str b31, [x27, #0x0]\n" - "str b16, [x22, #0x0]\n" - "str b23, [x21, #0x0]\n" - "str b24, [x20, #0x0]\n" + "str b16, [x23, #0x0]\n" + "str b23, [x22, #0x0]\n" + "str b24, [x21, #0x0]\n" "126:" // Height 4: Partial direct writeback: Done "b 128f\n" "127:" // Height 4: Full writeback "str q31, [x27, #0x0]\n" "add x27, x27, #0x10\n" - "str q16, [x22, #0x0]\n" - "str q23, [x21, #0x0]\n" - "str q24, [x20, #0x0]\n" + "str q16, [x23, #0x0]\n" + "str q23, [x22, #0x0]\n" + "str q24, [x21, #0x0]\n" "128:" // Height 4: Writeback done "subs x9, x9, #0x10\n" "bgt 98b\n" @@ -2089,7 +2088,6 @@ void a64_hybrid_u8qa_mmla_4x16 ( "madd %x[input_ptr], x20, x21, %x[input_ptr]\n" "b 1b\n" "130:" // Exit - : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" |