aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp1612
1 files changed, 805 insertions, 807 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
index 31fbf88603..ebe583b5d4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
@@ -78,7 +78,6 @@ void a64_hybrid_u8qa_dot_4x16 (
flags |= 0x20;
}
__asm__ __volatile__(
-
"1:" // Row loop
"cmp %x[M], #0x4\n"
"bge 91f\n"
@@ -102,11 +101,11 @@ void a64_hybrid_u8qa_dot_4x16 (
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
"cbnz x26, 6f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x24, x24, x20\n"
@@ -128,32 +127,32 @@ void a64_hybrid_u8qa_dot_4x16 (
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q21, [x28, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q20, [x28, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q26, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q25, [x28, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q24, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr q9, [x28, #0xc0]\n"
+ "ldr q23, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
+ "ldr q22, [x28, #0xd0]\n"
+ ".inst 0x6fa0e2b3 // udot v19.4s, v21.16b, v0.4b[1]\n"
+ "ldr q21, [x28, #0xe0]\n"
+ ".inst 0x6f80ea90 // udot v16.4s, v20.16b, v0.4b[2]\n"
+ "ldr q20, [x28, #0xf0]\n"
+ ".inst 0x6f80eb51 // udot v17.4s, v26.16b, v0.4b[2]\n"
"add x24, x24, #0x10\n"
- ".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
+ ".inst 0x6f80eb32 // udot v18.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x6f80eb13 // udot v19.4s, v24.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
- ".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
- ".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
- ".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
- ".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x6fa0eaf0 // udot v16.4s, v23.16b, v0.4b[3]\n"
+ ".inst 0x6fa0ead1 // udot v17.4s, v22.16b, v0.4b[3]\n"
+ ".inst 0x6fa0eab2 // udot v18.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x6fa0ea93 // udot v19.4s, v20.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 8f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
@@ -171,33 +170,33 @@ void a64_hybrid_u8qa_dot_4x16 (
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q21, [x28, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q20, [x28, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q26, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q25, [x28, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q24, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr q9, [x28, #0xc0]\n"
+ "ldr q23, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
+ "ldr q22, [x28, #0xd0]\n"
+ ".inst 0x6fa0e2b3 // udot v19.4s, v21.16b, v0.4b[1]\n"
+ "ldr q21, [x28, #0xe0]\n"
+ ".inst 0x6f80ea90 // udot v16.4s, v20.16b, v0.4b[2]\n"
+ "ldr q20, [x28, #0xf0]\n"
+ ".inst 0x6f80eb51 // udot v17.4s, v26.16b, v0.4b[2]\n"
"sub x25, x25, #0x10\n"
- ".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
+ ".inst 0x6f80eb32 // udot v18.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x6f80eb13 // udot v19.4s, v24.16b, v0.4b[2]\n"
"add x24, x24, #0x10\n"
"add x28, x28, #0x100\n"
- ".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
- ".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
- ".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
- ".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x6fa0eaf0 // udot v16.4s, v23.16b, v0.4b[3]\n"
+ ".inst 0x6fa0ead1 // udot v17.4s, v22.16b, v0.4b[3]\n"
+ ".inst 0x6fa0eab2 // udot v18.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x6fa0ea93 // udot v19.4s, v20.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 10f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
@@ -211,16 +210,16 @@ void a64_hybrid_u8qa_dot_4x16 (
"tbnz %x[flags], #31, 13f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
- "ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
+ "ldr q23, [x28, #0x0]\n"
+ "ldr q22, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
"cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
- ".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- ".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
- ".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
+ "ldr q21, [x28, #0x20]\n"
+ "ldr q20, [x28, #0x30]\n"
+ ".inst 0x6f80e2f0 // udot v16.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x6f80e2d1 // udot v17.4s, v22.16b, v0.4b[0]\n"
+ ".inst 0x6f80e2b2 // udot v18.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x6f80e293 // udot v19.4s, v20.16b, v0.4b[0]\n"
"add x28, x28, #0x40\n"
"bge 12b\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
@@ -236,14 +235,14 @@ void a64_hybrid_u8qa_dot_4x16 (
"tbnz %x[flags], #31, 17f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
- ".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- ".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
- "ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
- ".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
- ".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
+ "ldr q21, [x28, #0x0]\n"
+ "ldr q20, [x28, #0x10]\n"
+ ".inst 0x6f80e2b0 // udot v16.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x6f80e291 // udot v17.4s, v20.16b, v0.4b[0]\n"
+ "ldr q21, [x28, #0x20]\n"
+ "ldr q20, [x28, #0x30]\n"
+ ".inst 0x6f80e2b2 // udot v18.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x6f80e293 // udot v19.4s, v20.16b, v0.4b[0]\n"
"add x28, x28, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -252,72 +251,72 @@ void a64_hybrid_u8qa_dot_4x16 (
"bne 4b\n"
"prfm pstl1keep, [x27, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v1.4s }, [x23]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1r { v20.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "neg v1.4s, v1.4s\n"
+ "neg v20.4s, v20.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
- "mul v11.4s, v11.4s, v1.4s\n"
+ "mul v11.4s, v11.4s, v20.4s\n"
"19:" // Height 1: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q23, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q22, [x10, #0x20]\n"
+ "ldr q21, [x10, #0x30]\n"
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v20.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
- "add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x23]\n"
- "add v19.4s, v19.4s, v3.4s\n"
- "sqrdmulh v16.4s, v16.4s, v4.4s\n"
+ "add v16.4s, v16.4s, v24.4s\n"
+ "add v17.4s, v17.4s, v23.4s\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v19.4s, v19.4s, v21.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
"add x10, x10, #0x40\n"
- "sqrdmulh v17.4s, v17.4s, v4.4s\n"
- "sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v20.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v20.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v20.4s\n"
"tbz %x[flags], #5, 20f\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "and v5.16b, v17.16b, v0.16b\n"
- "and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
+ "and v23.16b, v16.16b, v0.16b\n"
+ "and v22.16b, v17.16b, v0.16b\n"
+ "and v21.16b, v18.16b, v0.16b\n"
+ "and v20.16b, v19.16b, v0.16b\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v23.4s\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v20.4s\n"
"20:" // Height 1: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[c_offset]\n"
+ "ld1r { v22.4s }, [x20]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v21.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v22.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "ld1r { v20.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v22.4s\n"
"cmp x9, #0x10\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smax v16.4s, v16.4s, v5.4s\n"
- "smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
+ "smin v16.4s, v16.4s, v21.4s\n"
+ "smin v17.4s, v17.4s, v21.4s\n"
+ "smin v18.4s, v18.4s, v21.4s\n"
+ "smin v19.4s, v19.4s, v21.4s\n"
+ "smax v16.4s, v16.4s, v20.4s\n"
+ "smax v17.4s, v17.4s, v20.4s\n"
+ "smax v18.4s, v18.4s, v20.4s\n"
+ "smax v19.4s, v19.4s, v20.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
@@ -397,12 +396,12 @@ void a64_hybrid_u8qa_dot_4x16 (
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 35f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
"cbnz x26, 36f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x24, x24, x20\n"
@@ -410,7 +409,7 @@ void a64_hybrid_u8qa_dot_4x16 (
"b 36f\n"
"35:" // Height 2: setup direct input
"mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
+ "add x23, x24, x21\n"
"36:" // Height 2: input setup done
"cmp x25, #0x10\n"
"blt 41f\n"
@@ -428,48 +427,48 @@ void a64_hybrid_u8qa_dot_4x16 (
"37:" // Height 2: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q25, [x28, #0x70]\n"
"add x24, x24, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q24, [x28, #0x80]\n"
"add x23, x23, #0x10\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q30, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q29, [x28, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q28, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr q9, [x28, #0xc0]\n"
+ "ldr q27, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- ".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x6fa0e333 // udot v19.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x6fa1e337 // udot v23.4s, v25.16b, v1.4b[1]\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x6f80eb10 // udot v16.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x6f81eb14 // udot v20.4s, v24.16b, v1.4b[2]\n"
+ "ldr q24, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- ".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- ".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
- ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
- ".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
- ".inst 0x6fa1e934 // udot v20.4s, v9.16b, v1.4b[3]\n"
- ".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
- ".inst 0x6fa1e955 // udot v21.4s, v10.16b, v1.4b[3]\n"
- ".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
- ".inst 0x6fa1e896 // udot v22.4s, v4.16b, v1.4b[3]\n"
- ".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
- ".inst 0x6fa1e8b7 // udot v23.4s, v5.16b, v1.4b[3]\n"
+ ".inst 0x6f80ebd1 // udot v17.4s, v30.16b, v0.4b[2]\n"
+ ".inst 0x6f81ebd5 // udot v21.4s, v30.16b, v1.4b[2]\n"
+ ".inst 0x6f80ebb2 // udot v18.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x6f81ebb6 // udot v22.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x6f80eb93 // udot v19.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x6f81eb97 // udot v23.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x6fa0eb70 // udot v16.4s, v27.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb74 // udot v20.4s, v27.16b, v1.4b[3]\n"
+ ".inst 0x6fa0eb51 // udot v17.4s, v26.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb55 // udot v21.4s, v26.16b, v1.4b[3]\n"
+ ".inst 0x6fa0eb32 // udot v18.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb36 // udot v22.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x6fa0eb13 // udot v19.4s, v24.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb17 // udot v23.4s, v24.16b, v1.4b[3]\n"
"tbnz %x[flags], #31, 38f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
@@ -491,49 +490,49 @@ void a64_hybrid_u8qa_dot_4x16 (
"39:" // Height 2: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q25, [x28, #0x70]\n"
"sub x25, x25, #0x10\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q24, [x28, #0x80]\n"
"add x24, x24, #0x10\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q30, [x28, #0x90]\n"
"add x23, x23, #0x10\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q29, [x28, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q28, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr q9, [x28, #0xc0]\n"
+ "ldr q27, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- ".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x6fa0e333 // udot v19.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x6fa1e337 // udot v23.4s, v25.16b, v1.4b[1]\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x6f80eb10 // udot v16.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x6f81eb14 // udot v20.4s, v24.16b, v1.4b[2]\n"
+ "ldr q24, [x28, #0xf0]\n"
"add x28, x28, #0x100\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- ".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- ".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
- ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
- ".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
- ".inst 0x6fa1e934 // udot v20.4s, v9.16b, v1.4b[3]\n"
- ".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
- ".inst 0x6fa1e955 // udot v21.4s, v10.16b, v1.4b[3]\n"
- ".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
- ".inst 0x6fa1e896 // udot v22.4s, v4.16b, v1.4b[3]\n"
- ".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
- ".inst 0x6fa1e8b7 // udot v23.4s, v5.16b, v1.4b[3]\n"
+ ".inst 0x6f80ebd1 // udot v17.4s, v30.16b, v0.4b[2]\n"
+ ".inst 0x6f81ebd5 // udot v21.4s, v30.16b, v1.4b[2]\n"
+ ".inst 0x6f80ebb2 // udot v18.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x6f81ebb6 // udot v22.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x6f80eb93 // udot v19.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x6f81eb97 // udot v23.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x6fa0eb70 // udot v16.4s, v27.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb74 // udot v20.4s, v27.16b, v1.4b[3]\n"
+ ".inst 0x6fa0eb51 // udot v17.4s, v26.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb55 // udot v21.4s, v26.16b, v1.4b[3]\n"
+ ".inst 0x6fa0eb32 // udot v18.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb36 // udot v22.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x6fa0eb13 // udot v19.4s, v24.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb17 // udot v23.4s, v24.16b, v1.4b[3]\n"
"tbnz %x[flags], #31, 40f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
@@ -551,21 +550,21 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"43:" // Height 2: Multiply loop: unique 7: skip row sum
- "ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
+ "ldr q27, [x28, #0x0]\n"
+ "ldr q26, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
"cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
- ".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- ".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
- ".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
+ "ldr q25, [x28, #0x20]\n"
+ "ldr q24, [x28, #0x30]\n"
+ ".inst 0x6f80e370 // udot v16.4s, v27.16b, v0.4b[0]\n"
+ ".inst 0x6f81e374 // udot v20.4s, v27.16b, v1.4b[0]\n"
+ ".inst 0x6f80e351 // udot v17.4s, v26.16b, v0.4b[0]\n"
+ ".inst 0x6f81e355 // udot v21.4s, v26.16b, v1.4b[0]\n"
"add x28, x28, #0x40\n"
- ".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x6f81e116 // udot v22.4s, v8.16b, v1.4b[0]\n"
- ".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
- ".inst 0x6f81e137 // udot v23.4s, v9.16b, v1.4b[0]\n"
+ ".inst 0x6f80e332 // udot v18.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x6f81e336 // udot v22.4s, v25.16b, v1.4b[0]\n"
+ ".inst 0x6f80e313 // udot v19.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x6f81e317 // udot v23.4s, v24.16b, v1.4b[0]\n"
"bge 42b\n"
"44:" // Height 2: Multiply loop: Skip odd blocks
"cbz x25, 48f\n"
@@ -584,209 +583,209 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
- ".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- ".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
- ".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
- ".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
- ".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
- ".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q26, [x28, #0x10]\n"
+ ".inst 0x6f80e310 // udot v16.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x6f81e314 // udot v20.4s, v24.16b, v1.4b[0]\n"
+ "ldr q25, [x28, #0x20]\n"
+ "ldr q24, [x28, #0x30]\n"
+ ".inst 0x6f80e351 // udot v17.4s, v26.16b, v0.4b[0]\n"
+ ".inst 0x6f81e355 // udot v21.4s, v26.16b, v1.4b[0]\n"
+ ".inst 0x6f80e332 // udot v18.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x6f81e336 // udot v22.4s, v25.16b, v1.4b[0]\n"
"add x28, x28, #0x40\n"
- ".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
- ".inst 0x6f81e0d7 // udot v23.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x6f80e313 // udot v19.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x6f81e317 // udot v23.4s, v24.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
"cmp x26, x20\n"
"bne 34b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
+ "add x23, x27, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v2.4s }, [x23]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1r { v24.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
- "neg v2.4s, v2.4s\n"
+ "neg v24.4s, v24.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
- "mul v11.4s, v11.4s, v2.4s\n"
- "mul v12.4s, v12.4s, v2.4s\n"
+ "mul v11.4s, v11.4s, v24.4s\n"
+ "mul v12.4s, v12.4s, v24.4s\n"
"49:" // Height 2: skip row sum fixup
- "ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
+ "ldr q28, [x10, #0x0]\n"
+ "ldr q27, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q26, [x10, #0x20]\n"
+ "ldr q25, [x10, #0x30]\n"
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v24.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
- "add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add v16.4s, v16.4s, v28.4s\n"
+ "add v17.4s, v17.4s, v27.4s\n"
"add x10, x10, #0x40\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
- "ld1r { v0.4s }, [x23]\n"
- "add v21.4s, v21.4s, v1.4s\n"
- "add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
- "sqrdmulh v16.4s, v16.4s, v4.4s\n"
- "sqrdmulh v17.4s, v17.4s, v4.4s\n"
- "sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
- "sqrdmulh v20.4s, v20.4s, v4.4s\n"
- "sqrdmulh v21.4s, v21.4s, v4.4s\n"
- "sqrdmulh v22.4s, v22.4s, v4.4s\n"
- "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v25.4s\n"
+ "add v20.4s, v20.4s, v28.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v21.4s, v21.4s, v27.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v25.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v24.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v24.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v24.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v24.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v24.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v24.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v24.4s\n"
"tbz %x[flags], #5, 50f\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
- "and v5.16b, v17.16b, v0.16b\n"
- "and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
- "and v8.16b, v20.16b, v0.16b\n"
- "and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v9.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
+ "and v24.16b, v16.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v24.4s\n"
+ "and v30.16b, v17.16b, v0.16b\n"
+ "and v29.16b, v18.16b, v0.16b\n"
+ "and v28.16b, v19.16b, v0.16b\n"
+ "and v27.16b, v20.16b, v0.16b\n"
+ "and v26.16b, v21.16b, v0.16b\n"
+ "and v25.16b, v22.16b, v0.16b\n"
+ "and v24.16b, v23.16b, v0.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v30.4s\n"
+ "sqadd v18.4s, v18.4s, v29.4s\n"
+ "sqadd v19.4s, v19.4s, v28.4s\n"
+ "sqadd v20.4s, v20.4s, v27.4s\n"
+ "sqadd v21.4s, v21.4s, v26.4s\n"
+ "sqadd v22.4s, v22.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v24.4s\n"
"50:" // Height 2: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[c_offset]\n"
+ "ld1r { v26.4s }, [x20]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v25.4s }, [x20]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
+ "add x20, %x[qp], %[minval]\n"
+ "ld1r { v24.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"cmp x9, #0x10\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smax v16.4s, v16.4s, v5.4s\n"
- "smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
- "smax v20.4s, v20.4s, v5.4s\n"
- "smax v21.4s, v21.4s, v5.4s\n"
- "smax v22.4s, v22.4s, v5.4s\n"
- "smax v23.4s, v23.4s, v5.4s\n"
+ "add v16.4s, v16.4s, v26.4s\n"
+ "add v17.4s, v17.4s, v26.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v26.4s\n"
+ "add v20.4s, v20.4s, v26.4s\n"
+ "add v21.4s, v21.4s, v26.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v26.4s\n"
+ "smin v16.4s, v16.4s, v25.4s\n"
+ "smin v17.4s, v17.4s, v25.4s\n"
+ "smin v18.4s, v18.4s, v25.4s\n"
+ "smin v19.4s, v19.4s, v25.4s\n"
+ "smin v20.4s, v20.4s, v25.4s\n"
+ "smin v21.4s, v21.4s, v25.4s\n"
+ "smin v22.4s, v22.4s, v25.4s\n"
+ "smin v23.4s, v23.4s, v25.4s\n"
+ "smax v16.4s, v16.4s, v24.4s\n"
+ "smax v17.4s, v17.4s, v24.4s\n"
+ "smax v18.4s, v18.4s, v24.4s\n"
+ "smax v19.4s, v19.4s, v24.4s\n"
+ "smax v20.4s, v20.4s, v24.4s\n"
+ "smax v21.4s, v21.4s, v24.4s\n"
+ "smax v22.4s, v22.4s, v24.4s\n"
+ "smax v23.4s, v23.4s, v24.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "uzp1 v18.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
- "uzp1 v21.8h, v22.8h, v23.8h\n"
- "uzp1 v16.16b, v16.16b, v17.16b\n"
- "uzp1 v20.16b, v20.16b, v21.16b\n"
+ "uzp1 v17.8h, v22.8h, v23.8h\n"
+ "uzp1 v16.16b, v16.16b, v18.16b\n"
+ "uzp1 v20.16b, v20.16b, v17.16b\n"
"bge 59f\n"
"tbz x9, #3, 54f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d20, [x23], #0x8\n"
"tbz x9, #2, 52f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x23], #0x4\n"
"tbz x9, #1, 51f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x23], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x23]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 58f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x23]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 53f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x23], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x23]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 58f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x23]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 56f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
+ "str s20, [x23], #0x4\n"
"tbz x9, #1, 55f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x23], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x23]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 58f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x23]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 57f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
+ "str h20, [x23], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x23]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
+ "str b20, [x23, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
+ "str q20, [x23, #0x0]\n"
"60:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 32b\n"
@@ -819,13 +818,13 @@ void a64_hybrid_u8qa_dot_4x16 (
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 65f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
"cbnz x26, 66f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x24, x24, x20\n"
@@ -834,8 +833,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"b 66f\n"
"65:" // Height 3: setup direct input
"mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
"66:" // Height 3: input setup done
"cmp x25, #0x10\n"
"blt 71f\n"
@@ -857,62 +856,62 @@ void a64_hybrid_u8qa_dot_4x16 (
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q29, [x28, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
"add x22, x22, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q28, [x28, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q5, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q4, [x28, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q3, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr q9, [x28, #0xc0]\n"
+ "ldr q31, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- ".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
- ".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
- ".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
+ "ldr q30, [x28, #0xd0]\n"
+ ".inst 0x6fa0e3b3 // udot v19.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x6fa1e3b7 // udot v23.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x6fa2e3bb // udot v27.4s, v29.16b, v2.4b[1]\n"
+ "ldr q29, [x28, #0xe0]\n"
+ ".inst 0x6f80eb90 // udot v16.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x6f81eb94 // udot v20.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x6f82eb98 // udot v24.4s, v28.16b, v2.4b[2]\n"
+ "ldr q28, [x28, #0xf0]\n"
+ ".inst 0x6f80e8b1 // udot v17.4s, v5.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
- ".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
- ".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- ".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
- ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
- ".inst 0x6f82e91b // udot v27.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
- ".inst 0x6fa1e934 // udot v20.4s, v9.16b, v1.4b[3]\n"
- ".inst 0x6fa2e938 // udot v24.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
- ".inst 0x6fa1e955 // udot v21.4s, v10.16b, v1.4b[3]\n"
- ".inst 0x6fa2e959 // udot v25.4s, v10.16b, v2.4b[3]\n"
- ".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
- ".inst 0x6fa1e896 // udot v22.4s, v4.16b, v1.4b[3]\n"
- ".inst 0x6fa2e89a // udot v26.4s, v4.16b, v2.4b[3]\n"
- ".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
- ".inst 0x6fa1e8b7 // udot v23.4s, v5.16b, v1.4b[3]\n"
- ".inst 0x6fa2e8bb // udot v27.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x6f81e8b5 // udot v21.4s, v5.16b, v1.4b[2]\n"
+ ".inst 0x6f82e8b9 // udot v25.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x6f80e892 // udot v18.4s, v4.16b, v0.4b[2]\n"
+ ".inst 0x6f81e896 // udot v22.4s, v4.16b, v1.4b[2]\n"
+ ".inst 0x6f82e89a // udot v26.4s, v4.16b, v2.4b[2]\n"
+ ".inst 0x6f80e873 // udot v19.4s, v3.16b, v0.4b[2]\n"
+ ".inst 0x6f81e877 // udot v23.4s, v3.16b, v1.4b[2]\n"
+ ".inst 0x6f82e87b // udot v27.4s, v3.16b, v2.4b[2]\n"
+ ".inst 0x6fa0ebf0 // udot v16.4s, v31.16b, v0.4b[3]\n"
+ ".inst 0x6fa1ebf4 // udot v20.4s, v31.16b, v1.4b[3]\n"
+ ".inst 0x6fa2ebf8 // udot v24.4s, v31.16b, v2.4b[3]\n"
+ ".inst 0x6fa0ebd1 // udot v17.4s, v30.16b, v0.4b[3]\n"
+ ".inst 0x6fa1ebd5 // udot v21.4s, v30.16b, v1.4b[3]\n"
+ ".inst 0x6fa2ebd9 // udot v25.4s, v30.16b, v2.4b[3]\n"
+ ".inst 0x6fa0ebb2 // udot v18.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x6fa1ebb6 // udot v22.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x6fa2ebba // udot v26.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x6fa0eb93 // udot v19.4s, v28.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb97 // udot v23.4s, v28.16b, v1.4b[3]\n"
+ ".inst 0x6fa2eb9b // udot v27.4s, v28.16b, v2.4b[3]\n"
"tbnz %x[flags], #31, 68f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
@@ -940,63 +939,63 @@ void a64_hybrid_u8qa_dot_4x16 (
"sub x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q4, [x28, #0x70]\n"
+ "ldr q29, [x28, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
"add x23, x23, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q5, [x28, #0x80]\n"
+ "ldr q28, [x28, #0x80]\n"
"add x22, x22, #0x10\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr q6, [x28, #0x90]\n"
+ "ldr q5, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr q7, [x28, #0xa0]\n"
+ "ldr q4, [x28, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr q8, [x28, #0xb0]\n"
+ "ldr q3, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr q9, [x28, #0xc0]\n"
+ "ldr q31, [x28, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr q10, [x28, #0xd0]\n"
- ".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- ".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
- ".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
- "ldr q4, [x28, #0xe0]\n"
- ".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- ".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
- ".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
- "ldr q5, [x28, #0xf0]\n"
- ".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
+ "ldr q30, [x28, #0xd0]\n"
+ ".inst 0x6fa0e3b3 // udot v19.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x6fa1e3b7 // udot v23.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x6fa2e3bb // udot v27.4s, v29.16b, v2.4b[1]\n"
+ "ldr q29, [x28, #0xe0]\n"
+ ".inst 0x6f80eb90 // udot v16.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x6f81eb94 // udot v20.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x6f82eb98 // udot v24.4s, v28.16b, v2.4b[2]\n"
+ "ldr q28, [x28, #0xf0]\n"
+ ".inst 0x6f80e8b1 // udot v17.4s, v5.16b, v0.4b[2]\n"
"add x28, x28, #0x100\n"
- ".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
- ".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
- ".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
- ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x6f80e913 // udot v19.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
- ".inst 0x6f82e91b // udot v27.4s, v8.16b, v2.4b[2]\n"
- ".inst 0x6fa0e930 // udot v16.4s, v9.16b, v0.4b[3]\n"
- ".inst 0x6fa1e934 // udot v20.4s, v9.16b, v1.4b[3]\n"
- ".inst 0x6fa2e938 // udot v24.4s, v9.16b, v2.4b[3]\n"
- ".inst 0x6fa0e951 // udot v17.4s, v10.16b, v0.4b[3]\n"
- ".inst 0x6fa1e955 // udot v21.4s, v10.16b, v1.4b[3]\n"
- ".inst 0x6fa2e959 // udot v25.4s, v10.16b, v2.4b[3]\n"
- ".inst 0x6fa0e892 // udot v18.4s, v4.16b, v0.4b[3]\n"
- ".inst 0x6fa1e896 // udot v22.4s, v4.16b, v1.4b[3]\n"
- ".inst 0x6fa2e89a // udot v26.4s, v4.16b, v2.4b[3]\n"
- ".inst 0x6fa0e8b3 // udot v19.4s, v5.16b, v0.4b[3]\n"
- ".inst 0x6fa1e8b7 // udot v23.4s, v5.16b, v1.4b[3]\n"
- ".inst 0x6fa2e8bb // udot v27.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x6f81e8b5 // udot v21.4s, v5.16b, v1.4b[2]\n"
+ ".inst 0x6f82e8b9 // udot v25.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x6f80e892 // udot v18.4s, v4.16b, v0.4b[2]\n"
+ ".inst 0x6f81e896 // udot v22.4s, v4.16b, v1.4b[2]\n"
+ ".inst 0x6f82e89a // udot v26.4s, v4.16b, v2.4b[2]\n"
+ ".inst 0x6f80e873 // udot v19.4s, v3.16b, v0.4b[2]\n"
+ ".inst 0x6f81e877 // udot v23.4s, v3.16b, v1.4b[2]\n"
+ ".inst 0x6f82e87b // udot v27.4s, v3.16b, v2.4b[2]\n"
+ ".inst 0x6fa0ebf0 // udot v16.4s, v31.16b, v0.4b[3]\n"
+ ".inst 0x6fa1ebf4 // udot v20.4s, v31.16b, v1.4b[3]\n"
+ ".inst 0x6fa2ebf8 // udot v24.4s, v31.16b, v2.4b[3]\n"
+ ".inst 0x6fa0ebd1 // udot v17.4s, v30.16b, v0.4b[3]\n"
+ ".inst 0x6fa1ebd5 // udot v21.4s, v30.16b, v1.4b[3]\n"
+ ".inst 0x6fa2ebd9 // udot v25.4s, v30.16b, v2.4b[3]\n"
+ ".inst 0x6fa0ebb2 // udot v18.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x6fa1ebb6 // udot v22.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x6fa2ebba // udot v26.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x6fa0eb93 // udot v19.4s, v28.16b, v0.4b[3]\n"
+ ".inst 0x6fa1eb97 // udot v23.4s, v28.16b, v1.4b[3]\n"
+ ".inst 0x6fa2eb9b // udot v27.4s, v28.16b, v2.4b[3]\n"
"tbnz %x[flags], #31, 70f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
@@ -1018,25 +1017,25 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"73:" // Height 3: Multiply loop: unique 11: skip row sum
- "ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
+ "ldr q31, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
"cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
- ".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- ".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6f82e0d8 // udot v24.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ ".inst 0x6f80e3f0 // udot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x6f81e3f4 // udot v20.4s, v31.16b, v1.4b[0]\n"
+ ".inst 0x6f82e3f8 // udot v24.4s, v31.16b, v2.4b[0]\n"
+ ".inst 0x6f80e3d1 // udot v17.4s, v30.16b, v0.4b[0]\n"
"add x28, x28, #0x40\n"
- ".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
- ".inst 0x6f82e0f9 // udot v25.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x6f81e116 // udot v22.4s, v8.16b, v1.4b[0]\n"
- ".inst 0x6f82e11a // udot v26.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
- ".inst 0x6f81e137 // udot v23.4s, v9.16b, v1.4b[0]\n"
- ".inst 0x6f82e13b // udot v27.4s, v9.16b, v2.4b[0]\n"
+ ".inst 0x6f81e3d5 // udot v21.4s, v30.16b, v1.4b[0]\n"
+ ".inst 0x6f82e3d9 // udot v25.4s, v30.16b, v2.4b[0]\n"
+ ".inst 0x6f80e3b2 // udot v18.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x6f81e3b6 // udot v22.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x6f82e3ba // udot v26.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x6f80e393 // udot v19.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x6f81e397 // udot v23.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x6f82e39b // udot v27.4s, v28.16b, v2.4b[0]\n"
"bge 72b\n"
"74:" // Height 3: Multiply loop: Skip odd blocks
"cbz x25, 78f\n"
@@ -1059,144 +1058,144 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 12: skip row sum
- "ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
- ".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- ".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
- "ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
- ".inst 0x6f82e158 // udot v24.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
- ".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
- ".inst 0x6f82e099 // udot v25.4s, v4.16b, v2.4b[0]\n"
+ "ldr q31, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ ".inst 0x6f80e3f0 // udot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x6f81e3f4 // udot v20.4s, v31.16b, v1.4b[0]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ ".inst 0x6f82e3f8 // udot v24.4s, v31.16b, v2.4b[0]\n"
+ ".inst 0x6f80e3d1 // udot v17.4s, v30.16b, v0.4b[0]\n"
+ ".inst 0x6f81e3d5 // udot v21.4s, v30.16b, v1.4b[0]\n"
+ ".inst 0x6f82e3d9 // udot v25.4s, v30.16b, v2.4b[0]\n"
"add x28, x28, #0x40\n"
- ".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
- ".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
- ".inst 0x6f82e0ba // udot v26.4s, v5.16b, v2.4b[0]\n"
- ".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
- ".inst 0x6f81e0d7 // udot v23.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6f82e0db // udot v27.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x6f80e3b2 // udot v18.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x6f81e3b6 // udot v22.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x6f82e3ba // udot v26.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x6f80e393 // udot v19.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x6f81e397 // udot v23.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x6f82e39b // udot v27.4s, v28.16b, v2.4b[0]\n"
"78:" // Height 3: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
"cmp x26, x20\n"
"bne 64b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
+ "add x23, x27, x20\n"
+ "add x22, x23, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v3.4s }, [x23]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1r { v28.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "neg v3.4s, v3.4s\n"
+ "neg v28.4s, v28.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
- "mul v11.4s, v11.4s, v3.4s\n"
- "mul v12.4s, v12.4s, v3.4s\n"
- "mul v13.4s, v13.4s, v3.4s\n"
+ "mul v11.4s, v11.4s, v28.4s\n"
+ "mul v12.4s, v12.4s, v28.4s\n"
+ "mul v13.4s, v13.4s, v28.4s\n"
"79:" // Height 3: skip row sum fixup
"ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
+ "ldr q31, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q30, [x10, #0x20]\n"
+ "ldr q29, [x10, #0x30]\n"
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v28.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
"add x10, x10, #0x40\n"
"add v26.4s, v26.4s, v13.4s\n"
"add v27.4s, v27.4s, v13.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v31.4s\n"
+ "add v18.4s, v18.4s, v30.4s\n"
+ "add v19.4s, v19.4s, v29.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
- "add v21.4s, v21.4s, v1.4s\n"
- "add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v31.4s\n"
+ "add v22.4s, v22.4s, v30.4s\n"
+ "add v23.4s, v23.4s, v29.4s\n"
"add v24.4s, v24.4s, v0.4s\n"
- "ld1r { v0.4s }, [x23]\n"
- "add v25.4s, v25.4s, v1.4s\n"
- "add v26.4s, v26.4s, v2.4s\n"
- "add v27.4s, v27.4s, v3.4s\n"
- "sqrdmulh v16.4s, v16.4s, v4.4s\n"
- "sqrdmulh v17.4s, v17.4s, v4.4s\n"
- "sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
- "sqrdmulh v20.4s, v20.4s, v4.4s\n"
- "sqrdmulh v21.4s, v21.4s, v4.4s\n"
- "sqrdmulh v22.4s, v22.4s, v4.4s\n"
- "sqrdmulh v23.4s, v23.4s, v4.4s\n"
- "sqrdmulh v24.4s, v24.4s, v4.4s\n"
- "sqrdmulh v25.4s, v25.4s, v4.4s\n"
- "sqrdmulh v26.4s, v26.4s, v4.4s\n"
- "sqrdmulh v27.4s, v27.4s, v4.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v25.4s, v25.4s, v31.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v28.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v28.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v28.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v28.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v28.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v28.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v28.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v28.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v28.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v28.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v28.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v28.4s\n"
"tbz %x[flags], #5, 80f\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "and v5.16b, v17.16b, v0.16b\n"
- "and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
- "and v8.16b, v20.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "and v5.16b, v24.16b, v0.16b\n"
- "and v6.16b, v25.16b, v0.16b\n"
- "and v7.16b, v26.16b, v0.16b\n"
- "and v8.16b, v27.16b, v0.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v9.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v5.4s\n"
- "sqadd v25.4s, v25.4s, v6.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "sqadd v27.4s, v27.4s, v8.4s\n"
+ "and v1.16b, v16.16b, v0.16b\n"
+ "and v31.16b, v17.16b, v0.16b\n"
+ "and v30.16b, v18.16b, v0.16b\n"
+ "and v29.16b, v19.16b, v0.16b\n"
+ "and v28.16b, v20.16b, v0.16b\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v1.4s\n"
+ "sqadd v17.4s, v17.4s, v31.4s\n"
+ "sqadd v18.4s, v18.4s, v30.4s\n"
+ "sqadd v19.4s, v19.4s, v29.4s\n"
+ "sqadd v20.4s, v20.4s, v28.4s\n"
+ "and v3.16b, v21.16b, v0.16b\n"
+ "and v2.16b, v22.16b, v0.16b\n"
+ "and v1.16b, v23.16b, v0.16b\n"
+ "and v31.16b, v24.16b, v0.16b\n"
+ "and v30.16b, v25.16b, v0.16b\n"
+ "and v29.16b, v26.16b, v0.16b\n"
+ "and v28.16b, v27.16b, v0.16b\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sqadd v21.4s, v21.4s, v3.4s\n"
+ "sqadd v22.4s, v22.4s, v2.4s\n"
+ "sqadd v23.4s, v23.4s, v1.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "sqadd v25.4s, v25.4s, v30.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "sqadd v27.4s, v27.4s, v28.4s\n"
"80:" // Height 3: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[c_offset]\n"
+ "ld1r { v30.4s }, [x20]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v29.4s }, [x20]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
+ "add x20, %x[qp], %[minval]\n"
+ "ld1r { v28.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"cmp x9, #0x10\n"
@@ -1204,132 +1203,132 @@ void a64_hybrid_u8qa_dot_4x16 (
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
"srshl v27.4s, v27.4s, v0.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smax v16.4s, v16.4s, v5.4s\n"
- "smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
- "smax v20.4s, v20.4s, v5.4s\n"
- "smax v21.4s, v21.4s, v5.4s\n"
- "smax v22.4s, v22.4s, v5.4s\n"
- "smax v23.4s, v23.4s, v5.4s\n"
- "smax v24.4s, v24.4s, v5.4s\n"
- "smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
+ "add v16.4s, v16.4s, v30.4s\n"
+ "add v17.4s, v17.4s, v30.4s\n"
+ "add v18.4s, v18.4s, v30.4s\n"
+ "add v19.4s, v19.4s, v30.4s\n"
+ "add v20.4s, v20.4s, v30.4s\n"
+ "add v21.4s, v21.4s, v30.4s\n"
+ "add v22.4s, v22.4s, v30.4s\n"
+ "add v23.4s, v23.4s, v30.4s\n"
+ "add v24.4s, v24.4s, v30.4s\n"
+ "add v25.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v30.4s\n"
+ "smin v16.4s, v16.4s, v29.4s\n"
+ "smin v17.4s, v17.4s, v29.4s\n"
+ "smin v18.4s, v18.4s, v29.4s\n"
+ "smin v19.4s, v19.4s, v29.4s\n"
+ "smin v20.4s, v20.4s, v29.4s\n"
+ "smin v21.4s, v21.4s, v29.4s\n"
+ "smin v22.4s, v22.4s, v29.4s\n"
+ "smin v23.4s, v23.4s, v29.4s\n"
+ "smin v24.4s, v24.4s, v29.4s\n"
+ "smin v25.4s, v25.4s, v29.4s\n"
+ "smin v26.4s, v26.4s, v29.4s\n"
+ "smin v27.4s, v27.4s, v29.4s\n"
+ "smax v16.4s, v16.4s, v28.4s\n"
+ "smax v17.4s, v17.4s, v28.4s\n"
+ "smax v18.4s, v18.4s, v28.4s\n"
+ "smax v19.4s, v19.4s, v28.4s\n"
+ "smax v20.4s, v20.4s, v28.4s\n"
+ "smax v21.4s, v21.4s, v28.4s\n"
+ "smax v22.4s, v22.4s, v28.4s\n"
+ "smax v23.4s, v23.4s, v28.4s\n"
+ "smax v24.4s, v24.4s, v28.4s\n"
+ "smax v25.4s, v25.4s, v28.4s\n"
+ "smax v26.4s, v26.4s, v28.4s\n"
+ "smax v27.4s, v27.4s, v28.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "uzp1 v19.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
- "uzp1 v21.8h, v22.8h, v23.8h\n"
+ "uzp1 v18.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
- "uzp1 v25.8h, v26.8h, v27.8h\n"
- "uzp1 v16.16b, v16.16b, v17.16b\n"
- "uzp1 v20.16b, v20.16b, v21.16b\n"
- "uzp1 v24.16b, v24.16b, v25.16b\n"
+ "uzp1 v17.8h, v26.8h, v27.8h\n"
+ "uzp1 v16.16b, v16.16b, v19.16b\n"
+ "uzp1 v20.16b, v20.16b, v18.16b\n"
+ "uzp1 v24.16b, v24.16b, v17.16b\n"
"bge 89f\n"
"tbz x9, #3, 84f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x9, #2, 82f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v24.s }[2], [x22], #0x4\n"
"tbz x9, #1, 81f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v20.h }[6], [x23], #0x2\n"
+ "st1 { v24.h }[6], [x22], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v20.b }[14], [x23]\n"
+ "st1 { v24.b }[14], [x22]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 88f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v20.b }[12], [x23]\n"
+ "st1 { v24.b }[12], [x22]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 83f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v20.h }[4], [x23], #0x2\n"
+ "st1 { v24.h }[4], [x22], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v20.b }[10], [x23]\n"
+ "st1 { v24.b }[10], [x22]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 88f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v20.b }[8], [x23]\n"
+ "st1 { v24.b }[8], [x22]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 86f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s20, [x23], #0x4\n"
+ "str s24, [x22], #0x4\n"
"tbz x9, #1, 85f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v20.h }[2], [x23], #0x2\n"
+ "st1 { v24.h }[2], [x22], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v20.b }[6], [x23]\n"
+ "st1 { v24.b }[6], [x22]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 88f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v20.b }[4], [x23]\n"
+ "st1 { v24.b }[4], [x22]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 87f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h20, [x23], #0x2\n"
+ "str h24, [x22], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v20.b }[2], [x23]\n"
+ "st1 { v24.b }[2], [x22]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b20, [x23, #0x0]\n"
+ "str b24, [x22, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q24, [x22, #0x0]\n"
"90:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 62b\n"
@@ -1370,14 +1369,14 @@ void a64_hybrid_u8qa_dot_4x16 (
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
"ldr w25, [x20, x26, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 95f\n"
- "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x24, [x21, #0x0]\n"
- "ldr x23, [x21, #0x8]\n"
- "ldr x22, [x21, #0x10]\n"
- "ldr x21, [x21, #0x18]\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "ldr x21, [x20, #0x18]\n"
"cbnz x26, 96f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
"add x24, x24, x20\n"
@@ -1387,9 +1386,9 @@ void a64_hybrid_u8qa_dot_4x16 (
"b 96f\n"
"95:" // Height 4: setup direct input
"mov x24, %x[input_ptr]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
"96:" // Height 4: input setup done
"cmp x25, #0x10\n"
"blt 101f\n"
@@ -1614,29 +1613,29 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"103:" // Height 4: Multiply loop: unique 15: skip row sum
- "ldr q6, [x28, #0x0]\n"
- "ldr q7, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
"cmp x25, #0x4\n"
- "ldr q8, [x28, #0x20]\n"
- "ldr q9, [x28, #0x30]\n"
- ".inst 0x6f80e0d0 // udot v16.4s, v6.16b, v0.4b[0]\n"
- ".inst 0x6f81e0d4 // udot v20.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6f82e0d8 // udot v24.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
+ "ldr q5, [x28, #0x20]\n"
+ "ldr q4, [x28, #0x30]\n"
+ ".inst 0x6f80e0f0 // udot v16.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x6f81e0f4 // udot v20.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x6f83e0fc // udot v28.4s, v7.16b, v3.4b[0]\n"
"add x28, x28, #0x40\n"
- ".inst 0x6f80e0f1 // udot v17.4s, v7.16b, v0.4b[0]\n"
- ".inst 0x6f81e0f5 // udot v21.4s, v7.16b, v1.4b[0]\n"
- ".inst 0x6f82e0f9 // udot v25.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x6f83e0fd // udot v29.4s, v7.16b, v3.4b[0]\n"
- ".inst 0x6f80e112 // udot v18.4s, v8.16b, v0.4b[0]\n"
- ".inst 0x6f81e116 // udot v22.4s, v8.16b, v1.4b[0]\n"
- ".inst 0x6f82e11a // udot v26.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x6f83e11e // udot v30.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x6f80e133 // udot v19.4s, v9.16b, v0.4b[0]\n"
- ".inst 0x6f81e137 // udot v23.4s, v9.16b, v1.4b[0]\n"
- ".inst 0x6f82e13b // udot v27.4s, v9.16b, v2.4b[0]\n"
- ".inst 0x6f83e13f // udot v31.4s, v9.16b, v3.4b[0]\n"
+ ".inst 0x6f80e0d1 // udot v17.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x6f81e0d5 // udot v21.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x6f82e0d9 // udot v25.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x6f83e0dd // udot v29.4s, v6.16b, v3.4b[0]\n"
+ ".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
+ ".inst 0x6f82e0ba // udot v26.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x6f83e0be // udot v30.4s, v5.16b, v3.4b[0]\n"
+ ".inst 0x6f80e093 // udot v19.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x6f81e097 // udot v23.4s, v4.16b, v1.4b[0]\n"
+ ".inst 0x6f82e09b // udot v27.4s, v4.16b, v2.4b[0]\n"
+ ".inst 0x6f83e09f // udot v31.4s, v4.16b, v3.4b[0]\n"
"bge 102b\n"
"104:" // Height 4: Multiply loop: Skip odd blocks
"cbz x25, 108f\n"
@@ -1663,73 +1662,73 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"107:" // Height 4: Multiply loop: unique 16: skip row sum
- "ldr q10, [x28, #0x0]\n"
- "ldr q4, [x28, #0x10]\n"
- ".inst 0x6f80e150 // udot v16.4s, v10.16b, v0.4b[0]\n"
- ".inst 0x6f81e154 // udot v20.4s, v10.16b, v1.4b[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6f80e0f0 // udot v16.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x6f81e0f4 // udot v20.4s, v7.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
- "ldr q6, [x28, #0x30]\n"
- ".inst 0x6f82e158 // udot v24.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x6f83e15c // udot v28.4s, v10.16b, v3.4b[0]\n"
- ".inst 0x6f80e091 // udot v17.4s, v4.16b, v0.4b[0]\n"
- ".inst 0x6f81e095 // udot v21.4s, v4.16b, v1.4b[0]\n"
+ "ldr q4, [x28, #0x30]\n"
+ ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x6f83e0fc // udot v28.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x6f80e0d1 // udot v17.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x6f81e0d5 // udot v21.4s, v6.16b, v1.4b[0]\n"
"add x28, x28, #0x40\n"
- ".inst 0x6f82e099 // udot v25.4s, v4.16b, v2.4b[0]\n"
- ".inst 0x6f83e09d // udot v29.4s, v4.16b, v3.4b[0]\n"
+ ".inst 0x6f82e0d9 // udot v25.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x6f83e0dd // udot v29.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
".inst 0x6f81e0b6 // udot v22.4s, v5.16b, v1.4b[0]\n"
".inst 0x6f82e0ba // udot v26.4s, v5.16b, v2.4b[0]\n"
".inst 0x6f83e0be // udot v30.4s, v5.16b, v3.4b[0]\n"
- ".inst 0x6f80e0d3 // udot v19.4s, v6.16b, v0.4b[0]\n"
- ".inst 0x6f81e0d7 // udot v23.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6f82e0db // udot v27.4s, v6.16b, v2.4b[0]\n"
- ".inst 0x6f83e0df // udot v31.4s, v6.16b, v3.4b[0]\n"
+ ".inst 0x6f80e093 // udot v19.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x6f81e097 // udot v23.4s, v4.16b, v1.4b[0]\n"
+ ".inst 0x6f82e09b // udot v27.4s, v4.16b, v2.4b[0]\n"
+ ".inst 0x6f83e09f // udot v31.4s, v4.16b, v3.4b[0]\n"
"108:" // Height 4: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
"cmp x26, x20\n"
"bne 94b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x27, x20\n"
- "add x21, x22, x20\n"
+ "add x23, x27, x20\n"
+ "add x22, x23, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x20, x21, x20\n"
+ "add x21, x22, x20\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
"prfm pstl1keep, [x21, #0x0]\n"
- "prfm pstl1keep, [x20, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
- "add x23, %x[qp], %[b_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1r { v0.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
- "neg v4.4s, v4.4s\n"
+ "neg v0.4s, v0.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
- "mul v11.4s, v11.4s, v4.4s\n"
- "mul v12.4s, v12.4s, v4.4s\n"
- "mul v13.4s, v13.4s, v4.4s\n"
- "mul v14.4s, v14.4s, v4.4s\n"
+ "mul v11.4s, v11.4s, v0.4s\n"
+ "mul v12.4s, v12.4s, v0.4s\n"
+ "mul v13.4s, v13.4s, v0.4s\n"
+ "mul v14.4s, v14.4s, v0.4s\n"
"109:" // Height 4: skip row sum fixup
"ldr q0, [x10, #0x0]\n"
- "ldr q1, [x10, #0x10]\n"
+ "ldr q4, [x10, #0x10]\n"
"add v16.4s, v16.4s, v11.4s\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q2, [x10, #0x20]\n"
- "ldr q3, [x10, #0x30]\n"
+ "ldr q3, [x10, #0x20]\n"
+ "ldr q2, [x10, #0x30]\n"
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
- "add x23, %x[qp], %[per_layer_mul]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v1.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
- "add x23, %x[qp], %[per_layer_right_shift]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
"add x10, x10, #0x40\n"
@@ -1740,100 +1739,100 @@ void a64_hybrid_u8qa_dot_4x16 (
"add v30.4s, v30.4s, v14.4s\n"
"add v31.4s, v31.4s, v14.4s\n"
"add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v1.4s\n"
- "add v18.4s, v18.4s, v2.4s\n"
- "add v19.4s, v19.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "add v18.4s, v18.4s, v3.4s\n"
+ "add v19.4s, v19.4s, v2.4s\n"
"add v20.4s, v20.4s, v0.4s\n"
- "add v21.4s, v21.4s, v1.4s\n"
- "add v22.4s, v22.4s, v2.4s\n"
- "add v23.4s, v23.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "add v22.4s, v22.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v2.4s\n"
"add v24.4s, v24.4s, v0.4s\n"
- "add v25.4s, v25.4s, v1.4s\n"
- "add v26.4s, v26.4s, v2.4s\n"
- "add v27.4s, v27.4s, v3.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v2.4s\n"
"add v28.4s, v28.4s, v0.4s\n"
- "ld1r { v0.4s }, [x23]\n"
- "add v29.4s, v29.4s, v1.4s\n"
- "add v30.4s, v30.4s, v2.4s\n"
- "add v31.4s, v31.4s, v3.4s\n"
- "sqrdmulh v16.4s, v16.4s, v4.4s\n"
- "sqrdmulh v17.4s, v17.4s, v4.4s\n"
- "sqrdmulh v18.4s, v18.4s, v4.4s\n"
- "sqrdmulh v19.4s, v19.4s, v4.4s\n"
- "sqrdmulh v20.4s, v20.4s, v4.4s\n"
- "sqrdmulh v21.4s, v21.4s, v4.4s\n"
- "sqrdmulh v22.4s, v22.4s, v4.4s\n"
- "sqrdmulh v23.4s, v23.4s, v4.4s\n"
- "sqrdmulh v24.4s, v24.4s, v4.4s\n"
- "sqrdmulh v25.4s, v25.4s, v4.4s\n"
- "sqrdmulh v26.4s, v26.4s, v4.4s\n"
- "sqrdmulh v27.4s, v27.4s, v4.4s\n"
- "sqrdmulh v28.4s, v28.4s, v4.4s\n"
- "sqrdmulh v29.4s, v29.4s, v4.4s\n"
- "sqrdmulh v30.4s, v30.4s, v4.4s\n"
- "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "add v30.4s, v30.4s, v3.4s\n"
+ "add v31.4s, v31.4s, v2.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v1.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v1.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v1.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v1.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v1.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v1.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v1.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v1.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v1.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v1.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v1.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v1.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v1.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v1.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v1.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v1.4s\n"
"tbz %x[flags], #5, 110f\n"
- "and v4.16b, v16.16b, v0.16b\n"
- "and v5.16b, v17.16b, v0.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v4.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "and v6.16b, v18.16b, v0.16b\n"
- "and v7.16b, v19.16b, v0.16b\n"
- "and v8.16b, v20.16b, v0.16b\n"
- "and v9.16b, v21.16b, v0.16b\n"
- "and v10.16b, v22.16b, v0.16b\n"
- "and v4.16b, v23.16b, v0.16b\n"
- "and v5.16b, v24.16b, v0.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
+ "and v2.16b, v16.16b, v0.16b\n"
+ "and v1.16b, v17.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v2.4s\n"
+ "sqadd v17.4s, v17.4s, v1.4s\n"
+ "and v7.16b, v18.16b, v0.16b\n"
+ "and v6.16b, v19.16b, v0.16b\n"
+ "and v5.16b, v20.16b, v0.16b\n"
+ "and v4.16b, v21.16b, v0.16b\n"
+ "and v3.16b, v22.16b, v0.16b\n"
+ "and v2.16b, v23.16b, v0.16b\n"
+ "and v1.16b, v24.16b, v0.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v18.4s, v18.4s, v6.4s\n"
- "sqadd v19.4s, v19.4s, v7.4s\n"
- "sqadd v20.4s, v20.4s, v8.4s\n"
- "sqadd v21.4s, v21.4s, v9.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "sqadd v23.4s, v23.4s, v4.4s\n"
- "sqadd v24.4s, v24.4s, v5.4s\n"
- "and v6.16b, v25.16b, v0.16b\n"
- "and v7.16b, v26.16b, v0.16b\n"
- "and v8.16b, v27.16b, v0.16b\n"
- "and v9.16b, v28.16b, v0.16b\n"
- "and v10.16b, v29.16b, v0.16b\n"
- "and v4.16b, v30.16b, v0.16b\n"
- "and v5.16b, v31.16b, v0.16b\n"
"sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v7.4s\n"
+ "sqadd v19.4s, v19.4s, v6.4s\n"
+ "sqadd v20.4s, v20.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v4.4s\n"
+ "sqadd v22.4s, v22.4s, v3.4s\n"
+ "sqadd v23.4s, v23.4s, v2.4s\n"
+ "sqadd v24.4s, v24.4s, v1.4s\n"
+ "and v7.16b, v25.16b, v0.16b\n"
+ "and v6.16b, v26.16b, v0.16b\n"
+ "and v5.16b, v27.16b, v0.16b\n"
+ "and v4.16b, v28.16b, v0.16b\n"
+ "and v3.16b, v29.16b, v0.16b\n"
+ "and v2.16b, v30.16b, v0.16b\n"
+ "and v1.16b, v31.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
- "sqadd v25.4s, v25.4s, v6.4s\n"
- "sqadd v26.4s, v26.4s, v7.4s\n"
- "sqadd v27.4s, v27.4s, v8.4s\n"
- "sqadd v28.4s, v28.4s, v9.4s\n"
- "sqadd v29.4s, v29.4s, v10.4s\n"
- "sqadd v30.4s, v30.4s, v4.4s\n"
- "sqadd v31.4s, v31.4s, v5.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v25.4s, v25.4s, v7.4s\n"
+ "sqadd v26.4s, v26.4s, v6.4s\n"
+ "sqadd v27.4s, v27.4s, v5.4s\n"
+ "sqadd v28.4s, v28.4s, v4.4s\n"
+ "sqadd v29.4s, v29.4s, v3.4s\n"
+ "sqadd v30.4s, v30.4s, v2.4s\n"
+ "sqadd v31.4s, v31.4s, v1.4s\n"
"110:" // Height 4: no shift correction
- "add x23, %x[qp], %[c_offset]\n"
- "ld1r { v4.4s }, [x23]\n"
+ "add x20, %x[qp], %[c_offset]\n"
+ "ld1r { v3.4s }, [x20]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "add x23, %x[qp], %[maxval]\n"
- "ld1r { v6.4s }, [x23]\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v2.4s }, [x20]\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
- "add x23, %x[qp], %[minval]\n"
- "ld1r { v5.4s }, [x23]\n"
+ "add x20, %x[qp], %[minval]\n"
+ "ld1r { v1.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"cmp x9, #0x10\n"
@@ -1845,163 +1844,163 @@ void a64_hybrid_u8qa_dot_4x16 (
"srshl v29.4s, v29.4s, v0.4s\n"
"srshl v30.4s, v30.4s, v0.4s\n"
"srshl v31.4s, v31.4s, v0.4s\n"
- "add v16.4s, v16.4s, v4.4s\n"
- "add v17.4s, v17.4s, v4.4s\n"
- "add v18.4s, v18.4s, v4.4s\n"
- "add v19.4s, v19.4s, v4.4s\n"
- "add v20.4s, v20.4s, v4.4s\n"
- "add v21.4s, v21.4s, v4.4s\n"
- "add v22.4s, v22.4s, v4.4s\n"
- "add v23.4s, v23.4s, v4.4s\n"
- "add v24.4s, v24.4s, v4.4s\n"
- "add v25.4s, v25.4s, v4.4s\n"
- "add v26.4s, v26.4s, v4.4s\n"
- "add v27.4s, v27.4s, v4.4s\n"
- "add v28.4s, v28.4s, v4.4s\n"
- "add v29.4s, v29.4s, v4.4s\n"
- "add v30.4s, v30.4s, v4.4s\n"
- "add v31.4s, v31.4s, v4.4s\n"
- "smin v16.4s, v16.4s, v6.4s\n"
- "smin v17.4s, v17.4s, v6.4s\n"
- "smin v18.4s, v18.4s, v6.4s\n"
- "smin v19.4s, v19.4s, v6.4s\n"
- "smin v20.4s, v20.4s, v6.4s\n"
- "smin v21.4s, v21.4s, v6.4s\n"
- "smin v22.4s, v22.4s, v6.4s\n"
- "smin v23.4s, v23.4s, v6.4s\n"
- "smin v24.4s, v24.4s, v6.4s\n"
- "smin v25.4s, v25.4s, v6.4s\n"
- "smin v26.4s, v26.4s, v6.4s\n"
- "smin v27.4s, v27.4s, v6.4s\n"
- "smin v28.4s, v28.4s, v6.4s\n"
- "smin v29.4s, v29.4s, v6.4s\n"
- "smin v30.4s, v30.4s, v6.4s\n"
- "smin v31.4s, v31.4s, v6.4s\n"
- "smax v16.4s, v16.4s, v5.4s\n"
- "smax v17.4s, v17.4s, v5.4s\n"
- "smax v18.4s, v18.4s, v5.4s\n"
- "smax v19.4s, v19.4s, v5.4s\n"
- "smax v20.4s, v20.4s, v5.4s\n"
- "smax v21.4s, v21.4s, v5.4s\n"
- "smax v22.4s, v22.4s, v5.4s\n"
- "smax v23.4s, v23.4s, v5.4s\n"
- "smax v24.4s, v24.4s, v5.4s\n"
- "smax v25.4s, v25.4s, v5.4s\n"
- "smax v26.4s, v26.4s, v5.4s\n"
- "smax v27.4s, v27.4s, v5.4s\n"
- "smax v28.4s, v28.4s, v5.4s\n"
- "smax v29.4s, v29.4s, v5.4s\n"
- "smax v30.4s, v30.4s, v5.4s\n"
- "smax v31.4s, v31.4s, v5.4s\n"
+ "add v16.4s, v16.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v3.4s\n"
+ "add v18.4s, v18.4s, v3.4s\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v20.4s, v20.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v3.4s\n"
+ "add v22.4s, v22.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "add v24.4s, v24.4s, v3.4s\n"
+ "add v25.4s, v25.4s, v3.4s\n"
+ "add v26.4s, v26.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v3.4s\n"
+ "add v28.4s, v28.4s, v3.4s\n"
+ "add v29.4s, v29.4s, v3.4s\n"
+ "add v30.4s, v30.4s, v3.4s\n"
+ "add v31.4s, v31.4s, v3.4s\n"
+ "smin v16.4s, v16.4s, v2.4s\n"
+ "smin v17.4s, v17.4s, v2.4s\n"
+ "smin v18.4s, v18.4s, v2.4s\n"
+ "smin v19.4s, v19.4s, v2.4s\n"
+ "smin v20.4s, v20.4s, v2.4s\n"
+ "smin v21.4s, v21.4s, v2.4s\n"
+ "smin v22.4s, v22.4s, v2.4s\n"
+ "smin v23.4s, v23.4s, v2.4s\n"
+ "smin v24.4s, v24.4s, v2.4s\n"
+ "smin v25.4s, v25.4s, v2.4s\n"
+ "smin v26.4s, v26.4s, v2.4s\n"
+ "smin v27.4s, v27.4s, v2.4s\n"
+ "smin v28.4s, v28.4s, v2.4s\n"
+ "smin v29.4s, v29.4s, v2.4s\n"
+ "smin v30.4s, v30.4s, v2.4s\n"
+ "smin v31.4s, v31.4s, v2.4s\n"
+ "smax v16.4s, v16.4s, v1.4s\n"
+ "smax v17.4s, v17.4s, v1.4s\n"
+ "smax v18.4s, v18.4s, v1.4s\n"
+ "smax v19.4s, v19.4s, v1.4s\n"
+ "smax v20.4s, v20.4s, v1.4s\n"
+ "smax v21.4s, v21.4s, v1.4s\n"
+ "smax v22.4s, v22.4s, v1.4s\n"
+ "smax v23.4s, v23.4s, v1.4s\n"
+ "smax v24.4s, v24.4s, v1.4s\n"
+ "smax v25.4s, v25.4s, v1.4s\n"
+ "smax v26.4s, v26.4s, v1.4s\n"
+ "smax v27.4s, v27.4s, v1.4s\n"
+ "smax v28.4s, v28.4s, v1.4s\n"
+ "smax v29.4s, v29.4s, v1.4s\n"
+ "smax v30.4s, v30.4s, v1.4s\n"
+ "smax v31.4s, v31.4s, v1.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
- "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "uzp1 v0.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
- "uzp1 v21.8h, v22.8h, v23.8h\n"
+ "uzp1 v19.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
- "uzp1 v25.8h, v26.8h, v27.8h\n"
+ "uzp1 v18.8h, v26.8h, v27.8h\n"
"uzp1 v28.8h, v28.8h, v29.8h\n"
- "uzp1 v29.8h, v30.8h, v31.8h\n"
- "uzp1 v16.16b, v16.16b, v17.16b\n"
- "uzp1 v20.16b, v20.16b, v21.16b\n"
- "uzp1 v24.16b, v24.16b, v25.16b\n"
- "uzp1 v28.16b, v28.16b, v29.16b\n"
+ "uzp1 v17.8h, v30.8h, v31.8h\n"
+ "uzp1 v16.16b, v16.16b, v0.16b\n"
+ "uzp1 v20.16b, v20.16b, v19.16b\n"
+ "uzp1 v24.16b, v24.16b, v18.16b\n"
+ "uzp1 v28.16b, v28.16b, v17.16b\n"
"bge 119f\n"
"tbz x9, #3, 114f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "str d28, [x20], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
"tbz x9, #2, 112f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "st1 { v28.s }[2], [x20], #0x4\n"
+ "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v28.s }[2], [x21], #0x4\n"
"tbz x9, #1, 111f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "st1 { v28.h }[6], [x20], #0x2\n"
+ "st1 { v20.h }[6], [x23], #0x2\n"
+ "st1 { v24.h }[6], [x22], #0x2\n"
+ "st1 { v28.h }[6], [x21], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
- "st1 { v28.b }[14], [x20]\n"
+ "st1 { v20.b }[14], [x23]\n"
+ "st1 { v24.b }[14], [x22]\n"
+ "st1 { v28.b }[14], [x21]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 118f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
- "st1 { v28.b }[12], [x20]\n"
+ "st1 { v20.b }[12], [x23]\n"
+ "st1 { v24.b }[12], [x22]\n"
+ "st1 { v28.b }[12], [x21]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 113f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "st1 { v28.h }[4], [x20], #0x2\n"
+ "st1 { v20.h }[4], [x23], #0x2\n"
+ "st1 { v24.h }[4], [x22], #0x2\n"
+ "st1 { v28.h }[4], [x21], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
- "st1 { v28.b }[10], [x20]\n"
+ "st1 { v20.b }[10], [x23]\n"
+ "st1 { v24.b }[10], [x22]\n"
+ "st1 { v28.b }[10], [x21]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 118f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
- "st1 { v28.b }[8], [x20]\n"
+ "st1 { v20.b }[8], [x23]\n"
+ "st1 { v24.b }[8], [x22]\n"
+ "st1 { v28.b }[8], [x21]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 116f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "str s28, [x20], #0x4\n"
+ "str s20, [x23], #0x4\n"
+ "str s24, [x22], #0x4\n"
+ "str s28, [x21], #0x4\n"
"tbz x9, #1, 115f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "st1 { v28.h }[2], [x20], #0x2\n"
+ "st1 { v20.h }[2], [x23], #0x2\n"
+ "st1 { v24.h }[2], [x22], #0x2\n"
+ "st1 { v28.h }[2], [x21], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
- "st1 { v28.b }[6], [x20]\n"
+ "st1 { v20.b }[6], [x23]\n"
+ "st1 { v24.b }[6], [x22]\n"
+ "st1 { v28.b }[6], [x21]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 118f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
- "st1 { v28.b }[4], [x20]\n"
+ "st1 { v20.b }[4], [x23]\n"
+ "st1 { v24.b }[4], [x22]\n"
+ "st1 { v28.b }[4], [x21]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 117f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "str h28, [x20], #0x2\n"
+ "str h20, [x23], #0x2\n"
+ "str h24, [x22], #0x2\n"
+ "str h28, [x21], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
- "st1 { v28.b }[2], [x20]\n"
+ "st1 { v20.b }[2], [x23]\n"
+ "st1 { v24.b }[2], [x22]\n"
+ "st1 { v28.b }[2], [x21]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
- "str b28, [x20, #0x0]\n"
+ "str b20, [x23, #0x0]\n"
+ "str b24, [x22, #0x0]\n"
+ "str b28, [x21, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
- "str q28, [x20, #0x0]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q24, [x22, #0x0]\n"
+ "str q28, [x21, #0x0]\n"
"120:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 92b\n"
@@ -2017,7 +2016,6 @@ void a64_hybrid_u8qa_dot_4x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
-
: [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"