From ba209750abc1ac7e42bab9fef5db284384d70fb3 Mon Sep 17 00:00:00 2001 From: Michael Tyler Date: Thu, 15 Dec 2022 12:39:29 +0000 Subject: Update CPU kernels to remove x19 Resolves: COMPMID-5805 Signed-off-by: Michael Tyler Change-Id: I250f64531e209625e4ff176dd5a552c1c34bc484 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8909 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Gunes Bayir Reviewed-by: Viet-Hoa Do Benchmark: Arm Jenkins --- .../kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp | 996 ++++++++++----------- 1 file changed, 498 insertions(+), 498 deletions(-) (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp') diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp index 0f3f5e35e1..f9d38c2925 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -10,16 +10,16 @@ * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #ifdef ARM_COMPUTE_ENABLE_SVE @@ -85,18 +85,18 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "cmp %x[M], #0x2\n" "bgt 29f\n" "beq 15f\n" + "mov x10, %x[col_bias]\n" "mov z11.s, #0x0\n" - "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" "mov z15.b, #0x1\n" - "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" - "mov x27, %x[col_bias]\n" "bic %x[flags], %x[flags], #0x80000000\n" - "mov x26, %x[output_ptr]\n" + "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" + "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" + "mov x27, %x[output_ptr]\n" "2:" // Height 1: Column loop + "mov x20, #0x0\n" + "whilelt p1.b, x20, x9\n" "mov z16.s, #0x0\n" - "mov x19, #0x0\n" "mov z17.s, #0x0\n" - "whilelt p1.b, x19, x9\n" "mov z18.s, #0x0\n" "mov z19.s, #0x0\n" "mov z20.s, #0x0\n" @@ -104,119 +104,119 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "mov z22.s, #0x0\n" "mov z23.s, #0x0\n" "3:" // Height 1: setup done - "mov x25, #0x0\n" + "mov x26, #0x0\n" "4:" // Height 1: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" - "ldr w24, [x20, x25, LSL #0x2]\n" + "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 5f\n" - "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n" - "add x20, x20, x19, LSL #3\n" - "ldr x23, [x20, #0x0]\n" - "cbnz x25, 6f\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" - "add x23, x23, x19\n" + "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" + "add x21, x21, x20, LSL #3\n" + "ldr x24, [x21, #0x0]\n" + "cbnz x26, 6f\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" + "add x24, x24, x20\n" "b 6f\n" "5:" // Height 1: setup direct input - "mov x23, %x[input_ptr]\n" + "mov x24, %x[input_ptr]\n" "6:" // Height 1: input setup done - "cmp x24, #0x10\n" + "cmp x25, #0x10\n" "ble 9f\n" "7:" // Height 1: Multiply loop: Main loop head + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "trn1 z0.d, z1.d, z2.d\n" "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" + ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "trn1 z0.d, z1.d, z2.d\n" - "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" - "add x23, x23, #0x10\n" "trn2 z1.d, z1.d, z2.d\n" + "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" - "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" - "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" - "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" + "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" + "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" + ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" + "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" "addvl x28, x28, #16\n" - ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" - "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" - "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" + "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n" ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n" - "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n" - "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n" + "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" - "ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n" + "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" + "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n" ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n" + "ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n" ".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n" - "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n" ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" + "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n" "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n" ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n" ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n" ".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n" ".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n" + "add x24, x24, #0x10\n" "tbnz %x[flags], #31, 8f\n" "udot z11.s, z0.b, z15.b\n" "udot z11.s, z1.b, z15.b\n" "8:" // Height 1: Multiply loop: unique 1: skip row sum - "sub x24, x24, #0x10\n" - "cmp x24, #0x10\n" + "sub x25, x25, #0x10\n" + "cmp x25, #0x10\n" "bgt 7b\n" "9:" // Height 1: Multiply loop: Single iteration only + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "trn1 z0.d, z1.d, z2.d\n" "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" + ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "trn1 z0.d, z1.d, z2.d\n" + "subs x25, x25, #0x8\n" "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" - "subs x24, x24, #0x8\n" - "trn2 z1.d, z1.d, z2.d\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" - "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" + "trn2 z1.d, z1.d, z2.d\n" ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" + "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" - "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" + "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" - "addvl x28, x28, #8\n" ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n" ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n" + "addvl x28, x28, #8\n" "ble 10f\n" "ld1b { z6.b }, p2/Z, [x28]\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n" ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n" + "ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n" ".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n" + ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" "ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n" - ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" - "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" - "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n" ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n" - "addvl x28, x28, #8\n" ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n" + "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" + "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n" ".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n" ".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n" + "addvl x28, x28, #8\n" "10:" // Height 1: Multiply loop: multiply skip "tbnz %x[flags], #31, 11f\n" "udot z11.s, z0.b, z15.b\n" "udot z11.s, z1.b, z15.b\n" "11:" // Height 1: Multiply loop: unique 2: skip row sum - "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" - "add x25, x25, #0x1\n" - "cmp x25, x19\n" + "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" + "add x26, x26, #0x1\n" + "cmp x26, x20\n" "bne 4b\n" "uzp1 z16.d, z16.d, z20.d\n" "uzp1 z17.d, z17.d, z21.d\n" @@ -224,33 +224,33 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "uzp1 z19.d, z19.d, z23.d\n" "mov z23.d, z16.d\n" "tbnz %x[flags], #31, 12f\n" + "add x23, %x[qp], %[b_offset]\n" + "ld1rw { z1.s }, p2/Z, [x23]\n" ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n" - "add x22, %x[qp], %[b_offset]\n" - "ld1rw { z1.s }, p2/Z, [x22]\n" - "mov z11.s, z11.s[0]\n" "neg z1.s, p2/M, z1.s\n" + "mov z11.s, z11.s[0]\n" "mul z11.s, p2/M, z11.s, z1.s\n" "12:" // Height 1: skip row sum fixup "add z23.s, z23.s, z11.s\n" - "ld1w { z0.s }, p2/Z, [x27]\n" - "orr %x[flags], %x[flags], #0x80000000\n" "add z17.s, z17.s, z11.s\n" - "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" + "ld1w { z0.s }, p2/Z, [x10]\n" + "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n" "add z18.s, z18.s, z11.s\n" - "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n" - "add x22, %x[qp], %[per_layer_mul]\n" "add z19.s, z19.s, z11.s\n" - "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n" - "addvl x27, x27, #4\n" + "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n" + "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n" + "add x23, %x[qp], %[per_layer_mul]\n" + "orr %x[flags], %x[flags], #0x80000000\n" "add z23.s, z23.s, z0.s\n" - "ld1rw { z0.s }, p2/Z, [x23]\n" "add z17.s, z17.s, z1.s\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" "add z18.s, z18.s, z2.s\n" "add z19.s, z19.s, z3.s\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" + "add x23, %x[qp], %[per_layer_right_shift]\n" + "ld1rw { z0.s }, p2/Z, [x23]\n" ".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n" ".inst 0x04a47631 // sqrdmulh z17.s, z17.s, z4.s\n" + "addvl x10, x10, #4\n" ".inst 0x04a47652 // sqrdmulh z18.s, z18.s, z4.s\n" ".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n" "tbz %x[flags], #5, 13f\n" @@ -261,26 +261,26 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "asr z4.s, z4.s, #0x1f\n" "asr z5.s, z5.s, #0x1f\n" "asr z6.s, z6.s, #0x1f\n" + "asr z7.s, z7.s, #0x1f\n" "sqadd z23.s, z23.s, z4.s\n" "sqadd z17.s, z17.s, z5.s\n" "sqadd z18.s, z18.s, z6.s\n" - "asr z7.s, z7.s, #0x1f\n" "sqadd z19.s, z19.s, z7.s\n" "13:" // Height 1: no shift correction + "add x23, %x[qp], %[c_offset]\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n" - "add x22, %x[qp], %[c_offset]\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" + "add z23.s, z23.s, z4.s\n" ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n" - "add x22, %x[qp], %[minval]\n" ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n" - "ld1rw { z5.s }, p2/Z, [x22]\n" - "add x22, %x[qp], %[maxval]\n" - ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" - "ld1rw { z6.s }, p2/Z, [x22]\n" - "add z23.s, z23.s, z4.s\n" "add z17.s, z17.s, z4.s\n" "add z18.s, z18.s, z4.s\n" + ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" + "add x23, %x[qp], %[maxval]\n" + "ld1rw { z6.s }, p2/Z, [x23]\n" "add z19.s, z19.s, z4.s\n" + "add x23, %x[qp], %[minval]\n" + "ld1rw { z5.s }, p2/Z, [x23]\n" "smin z23.s, p2/M, z23.s, z6.s\n" "smin z17.s, p2/M, z17.s, z6.s\n" "smin z18.s, p2/M, z18.s, z6.s\n" @@ -288,31 +288,31 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "smax z23.s, p2/M, z23.s, z5.s\n" "smax z17.s, p2/M, z17.s, z5.s\n" "smax z18.s, p2/M, z18.s, z5.s\n" - "smax z19.s, p2/M, z19.s, z5.s\n" "uzp1 z23.h, z23.h, z17.h\n" + "smax z19.s, p2/M, z19.s, z5.s\n" "uzp1 z17.h, z18.h, z19.h\n" "uzp1 z23.b, z23.b, z17.b\n" - "st1b { z23.b }, p1, [x26]\n" - "addvl x26, x26, #1\n" + "st1b { z23.b }, p1, [x27]\n" + "addvl x27, x27, #1\n" "14:" // Height 1: Writeback done "decw x9, ALL, MUL #4\n" "cmp x9, XZR\n" "bgt 2b\n" "b 58f\n" "15:" // Height 2 + "mov x10, %x[col_bias]\n" "mov z11.s, #0x0\n" - "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" - "mov x27, %x[col_bias]\n" "mov z12.s, #0x0\n" - "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" "bic %x[flags], %x[flags], #0x80000000\n" "mov z15.b, #0x1\n" - "mov x26, %x[output_ptr]\n" + "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" + "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" + "mov x27, %x[output_ptr]\n" "16:" // Height 2: Column loop + "mov x20, #0x0\n" + "whilelt p1.b, x20, x9\n" "mov z16.s, #0x0\n" - "mov x19, #0x0\n" "mov z17.s, #0x0\n" - "whilelt p1.b, x19, x9\n" "mov z18.s, #0x0\n" "mov z19.s, #0x0\n" "mov z20.s, #0x0\n" @@ -320,130 +320,130 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "mov z22.s, #0x0\n" "mov z23.s, #0x0\n" "17:" // Height 2: setup done - "mov x25, #0x0\n" + "mov x26, #0x0\n" "18:" // Height 2: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" - "ldr w24, [x20, x25, LSL #0x2]\n" + "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 19f\n" - "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n" - "add x20, x20, x19, LSL #3\n" - "ldr x23, [x20, #0x0]\n" - "ldr x22, [x20, #0x8]\n" - "cbnz x25, 20f\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" - "add x23, x23, x19\n" - "add x22, x22, x19\n" + "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" + "add x21, x21, x20, LSL #3\n" + "ldr x24, [x21, #0x0]\n" + "ldr x23, [x21, #0x8]\n" + "cbnz x26, 20f\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" + "add x24, x24, x20\n" + "add x23, x23, x20\n" "b 20f\n" "19:" // Height 2: setup direct input - "mov x23, %x[input_ptr]\n" - "add x22, x23, x19\n" + "mov x24, %x[input_ptr]\n" + "add x23, x24, x20\n" "20:" // Height 2: input setup done - "cmp x24, #0x10\n" + "cmp x25, #0x10\n" "ble 23f\n" "21:" // Height 2: Multiply loop: Main loop head + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "ld1rqb { z2.b }, p0/Z, [x23]\n" + "trn1 z0.d, z1.d, z2.d\n" "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" + ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "add x23, x23, #0x10\n" - "ld1rqb { z2.b }, p0/Z, [x22]\n" - "trn1 z0.d, z1.d, z2.d\n" - "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" - "add x22, x22, #0x10\n" "trn2 z1.d, z1.d, z2.d\n" + "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" - "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" - "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" - "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" + "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" + "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" + ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" + "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" "addvl x28, x28, #16\n" - ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" - "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" - "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" + "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n" ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n" - "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n" - "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n" + "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" - "ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n" + "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" + "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n" ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n" + "ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n" ".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n" - "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n" ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" + "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n" "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n" ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n" ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n" ".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n" ".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n" + "add x24, x24, #0x10\n" + "add x23, x23, #0x10\n" "tbnz %x[flags], #31, 22f\n" "udot z11.s, z0.b, z15.b\n" "udot z11.s, z1.b, z15.b\n" "22:" // Height 2: Multiply loop: unique 3: skip row sum - "sub x24, x24, #0x10\n" - "cmp x24, #0x10\n" + "sub x25, x25, #0x10\n" + "cmp x25, #0x10\n" "bgt 21b\n" "23:" // Height 2: Multiply loop: Single iteration only + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "ld1rqb { z2.b }, p0/Z, [x23]\n" + "trn1 z0.d, z1.d, z2.d\n" "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" + ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "subs x24, x24, #0x8\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "ld1rqb { z2.b }, p0/Z, [x22]\n" - "trn1 z0.d, z1.d, z2.d\n" + "subs x25, x25, #0x8\n" "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" - "trn2 z1.d, z1.d, z2.d\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" - "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" + "trn2 z1.d, z1.d, z2.d\n" ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" + "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" - "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" + "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" - "addvl x28, x28, #8\n" ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n" ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n" + "addvl x28, x28, #8\n" "ble 24f\n" "ld1b { z6.b }, p2/Z, [x28]\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n" ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n" + "ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n" ".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n" + ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" "ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n" - ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" - "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" - "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n" ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n" - "addvl x28, x28, #8\n" ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n" + "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" + "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n" ".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n" ".inst 0x45c69837 // ummla z23.s, z1.b, z6.b\n" + "addvl x28, x28, #8\n" "24:" // Height 2: Multiply loop: multiply skip "tbnz %x[flags], #31, 25f\n" "udot z11.s, z0.b, z15.b\n" "udot z11.s, z1.b, z15.b\n" "25:" // Height 2: Multiply loop: unique 4: skip row sum - "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" - "add x25, x25, #0x1\n" - "cmp x25, x19\n" + "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" + "add x26, x26, #0x1\n" + "cmp x26, x20\n" "bne 18b\n" "uzp1 z7.d, z16.d, z20.d\n" - "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" + "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" "uzp2 z16.d, z16.d, z20.d\n" - "add x21, x26, x19\n" + "add x22, x27, x20\n" "uzp1 z20.d, z17.d, z21.d\n" "uzp2 z17.d, z17.d, z21.d\n" "uzp1 z21.d, z18.d, z22.d\n" @@ -452,39 +452,39 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "uzp2 z19.d, z19.d, z23.d\n" "mov z23.d, z7.d\n" "tbnz %x[flags], #31, 26f\n" + "add x23, %x[qp], %[b_offset]\n" + "ld1rw { z2.s }, p2/Z, [x23]\n" ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n" - "add x22, %x[qp], %[b_offset]\n" - "ld1rw { z2.s }, p2/Z, [x22]\n" + "neg z2.s, p2/M, z2.s\n" "mov z12.s, z11.s[3]\n" "mov z11.s, z11.s[0]\n" - "neg z2.s, p2/M, z2.s\n" "mul z11.s, p2/M, z11.s, z2.s\n" "mul z12.s, p2/M, z12.s, z2.s\n" "26:" // Height 2: skip row sum fixup "add z23.s, z23.s, z11.s\n" - "ld1w { z0.s }, p2/Z, [x27]\n" - "orr %x[flags], %x[flags], #0x80000000\n" "add z20.s, z20.s, z11.s\n" - "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" + "ld1w { z0.s }, p2/Z, [x10]\n" + "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n" "add z21.s, z21.s, z11.s\n" - "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n" - "add x22, %x[qp], %[per_layer_mul]\n" "add z22.s, z22.s, z11.s\n" - "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n" - "addvl x27, x27, #4\n" + "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n" + "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n" "add z16.s, z16.s, z12.s\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" "add z17.s, z17.s, z12.s\n" + "add x23, %x[qp], %[per_layer_mul]\n" + "orr %x[flags], %x[flags], #0x80000000\n" "add z18.s, z18.s, z12.s\n" "add z19.s, z19.s, z12.s\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" + "add x23, %x[qp], %[per_layer_right_shift]\n" "add z23.s, z23.s, z0.s\n" "add z20.s, z20.s, z1.s\n" + "addvl x10, x10, #4\n" "add z21.s, z21.s, z2.s\n" "add z22.s, z22.s, z3.s\n" "add z16.s, z16.s, z0.s\n" - "ld1rw { z0.s }, p2/Z, [x23]\n" "add z17.s, z17.s, z1.s\n" + "ld1rw { z0.s }, p2/Z, [x23]\n" "add z18.s, z18.s, z2.s\n" "add z19.s, z19.s, z3.s\n" ".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n" @@ -497,97 +497,97 @@ void sve_hybrid_u8qa_mmla_4x4VL ( ".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n" "tbz %x[flags], #5, 27f\n" "and z4.d, z23.d, z0.d\n" - "and z5.d, z20.d, z0.d\n" - "and z6.d, z21.d, z0.d\n" "asr z4.s, z4.s, #0x1f\n" - "asr z5.s, z5.s, #0x1f\n" - "asr z6.s, z6.s, #0x1f\n" "sqadd z23.s, z23.s, z4.s\n" - "sqadd z20.s, z20.s, z5.s\n" - "sqadd z21.s, z21.s, z6.s\n" + "and z5.d, z20.d, z0.d\n" + "and z6.d, z21.d, z0.d\n" "and z7.d, z22.d, z0.d\n" "and z8.d, z16.d, z0.d\n" "and z9.d, z17.d, z0.d\n" + "and z10.d, z18.d, z0.d\n" + "and z4.d, z19.d, z0.d\n" + "asr z5.s, z5.s, #0x1f\n" + "asr z6.s, z6.s, #0x1f\n" "asr z7.s, z7.s, #0x1f\n" "asr z8.s, z8.s, #0x1f\n" "asr z9.s, z9.s, #0x1f\n" + "asr z10.s, z10.s, #0x1f\n" + "asr z4.s, z4.s, #0x1f\n" + "sqadd z20.s, z20.s, z5.s\n" + "sqadd z21.s, z21.s, z6.s\n" "sqadd z22.s, z22.s, z7.s\n" "sqadd z16.s, z16.s, z8.s\n" "sqadd z17.s, z17.s, z9.s\n" - "and z10.d, z18.d, z0.d\n" - "and z4.d, z19.d, z0.d\n" - "asr z10.s, z10.s, #0x1f\n" - "asr z4.s, z4.s, #0x1f\n" "sqadd z18.s, z18.s, z10.s\n" "sqadd z19.s, z19.s, z4.s\n" "27:" // Height 2: no shift correction + "add x23, %x[qp], %[c_offset]\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n" - "add x22, %x[qp], %[c_offset]\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" + "add z23.s, z23.s, z4.s\n" ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n" - "add x22, %x[qp], %[minval]\n" ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n" - "ld1rw { z5.s }, p2/Z, [x22]\n" - "add x22, %x[qp], %[maxval]\n" - ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n" - "ld1rw { z6.s }, p2/Z, [x22]\n" - ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n" - "add z23.s, z23.s, z4.s\n" "add z20.s, z20.s, z4.s\n" "add z21.s, z21.s, z4.s\n" + ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n" + ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n" "add z22.s, z22.s, z4.s\n" "add z16.s, z16.s, z4.s\n" + ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n" + ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n" + "add z17.s, z17.s, z4.s\n" + "add z18.s, z18.s, z4.s\n" + ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" + "add x23, %x[qp], %[maxval]\n" + "ld1rw { z6.s }, p2/Z, [x23]\n" + "add z19.s, z19.s, z4.s\n" + "add x23, %x[qp], %[minval]\n" + "ld1rw { z5.s }, p2/Z, [x23]\n" "smin z23.s, p2/M, z23.s, z6.s\n" "smin z20.s, p2/M, z20.s, z6.s\n" "smin z21.s, p2/M, z21.s, z6.s\n" "smin z22.s, p2/M, z22.s, z6.s\n" + "smin z16.s, p2/M, z16.s, z6.s\n" + "smin z17.s, p2/M, z17.s, z6.s\n" + "smin z18.s, p2/M, z18.s, z6.s\n" + "smin z19.s, p2/M, z19.s, z6.s\n" "smax z23.s, p2/M, z23.s, z5.s\n" "smax z20.s, p2/M, z20.s, z5.s\n" "smax z21.s, p2/M, z21.s, z5.s\n" - "smax z22.s, p2/M, z22.s, z5.s\n" - "smin z16.s, p2/M, z16.s, z6.s\n" "uzp1 z23.h, z23.h, z20.h\n" - ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n" - "uzp1 z20.h, z21.h, z22.h\n" + "smax z22.s, p2/M, z22.s, z5.s\n" "smax z16.s, p2/M, z16.s, z5.s\n" + "uzp1 z20.h, z21.h, z22.h\n" "uzp1 z23.b, z23.b, z20.b\n" - "st1b { z23.b }, p1, [x26]\n" - "add z17.s, z17.s, z4.s\n" - "addvl x26, x26, #1\n" - ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n" - ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" - "smin z17.s, p2/M, z17.s, z6.s\n" - "add z18.s, z18.s, z4.s\n" - "add z19.s, z19.s, z4.s\n" "smax z17.s, p2/M, z17.s, z5.s\n" - "smin z18.s, p2/M, z18.s, z6.s\n" - "smin z19.s, p2/M, z19.s, z6.s\n" - "uzp1 z16.h, z16.h, z17.h\n" "smax z18.s, p2/M, z18.s, z5.s\n" + "uzp1 z16.h, z16.h, z17.h\n" + "st1b { z23.b }, p1, [x27]\n" "smax z19.s, p2/M, z19.s, z5.s\n" "uzp1 z17.h, z18.h, z19.h\n" "uzp1 z16.b, z16.b, z17.b\n" - "st1b { z16.b }, p1, [x21]\n" + "st1b { z16.b }, p1, [x22]\n" + "addvl x27, x27, #1\n" "28:" // Height 2: Writeback done "decw x9, ALL, MUL #4\n" "cmp x9, XZR\n" "bgt 16b\n" "b 58f\n" "29:" // Height 3 + "mov x10, %x[col_bias]\n" "mov z11.s, #0x0\n" - "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" - "mov x27, %x[col_bias]\n" "mov z12.s, #0x0\n" - "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" "bic %x[flags], %x[flags], #0x80000000\n" "mov z13.s, #0x0\n" - "mov x26, %x[output_ptr]\n" "mov z15.b, #0x1\n" + "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" + "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" + "mov x27, %x[output_ptr]\n" "30:" // Height 3: Column loop + "mov x20, #0x0\n" + "whilelt p1.b, x20, x9\n" "mov z16.s, #0x0\n" - "mov x19, #0x0\n" "mov z17.s, #0x0\n" - "whilelt p1.b, x19, x9\n" "mov z18.s, #0x0\n" "mov z19.s, #0x0\n" "mov z20.s, #0x0\n" @@ -603,74 +603,74 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "mov z30.s, #0x0\n" "mov z31.s, #0x0\n" "31:" // Height 3: setup done - "mov x25, #0x0\n" + "mov x26, #0x0\n" "32:" // Height 3: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" - "ldr w24, [x20, x25, LSL #0x2]\n" + "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 33f\n" - "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n" - "add x20, x20, x19, LSL #3\n" - "ldr x23, [x20, #0x0]\n" - "ldr x22, [x20, #0x8]\n" - "ldr x21, [x20, #0x10]\n" - "cbnz x25, 34f\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" - "add x23, x23, x19\n" - "add x22, x22, x19\n" - "add x21, x21, x19\n" + "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" + "add x21, x21, x20, LSL #3\n" + "ldr x24, [x21, #0x0]\n" + "ldr x23, [x21, #0x8]\n" + "ldr x22, [x21, #0x10]\n" + "cbnz x26, 34f\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" + "add x24, x24, x20\n" + "add x23, x23, x20\n" + "add x22, x22, x20\n" "b 34f\n" "33:" // Height 3: setup direct input - "mov x23, %x[input_ptr]\n" - "add x22, x23, x19\n" - "add x21, x22, x19\n" + "mov x24, %x[input_ptr]\n" + "add x23, x24, x20\n" + "add x22, x23, x20\n" "34:" // Height 3: input setup done - "cmp x24, #0x10\n" + "cmp x25, #0x10\n" "ble 37f\n" "35:" // Height 3: Multiply loop: Main loop head - "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" - "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "add x23, x23, #0x10\n" - "ld1rqb { z2.b }, p0/Z, [x22]\n" + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "ld1rqb { z2.b }, p0/Z, [x23]\n" + "ld1rqb { z3.b }, p0/Z, [x22]\n" "trn1 z0.d, z1.d, z2.d\n" - "ld1rqb { z3.b }, p0/Z, [x21]\n" - "add x22, x22, #0x10\n" "trn2 z1.d, z1.d, z2.d\n" - "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" - "add x21, x21, #0x10\n" + "ld1b { z5.b }, p2/Z, [x28]\n" + "trn1 z2.d, z3.d, z4.d\n" + "trn2 z3.d, z3.d, z4.d\n" ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" + "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" + ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" + "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - "trn1 z2.d, z3.d, z4.d\n" "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" - "trn2 z3.d, z3.d, z4.d\n" + ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" + ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" - ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" "addvl x28, x28, #16\n" - ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" - "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n" - ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n" - "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" + "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n" ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n" - "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" + "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" ".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n" + "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" ".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n" "ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n" + "add x24, x24, #0x10\n" ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n" ".inst 0x45c4985b // ummla z27.s, z2.b, z4.b\n" "ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n" + "add x23, x23, #0x10\n" ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n" ".inst 0x45c5985f // ummla z31.s, z2.b, z5.b\n" "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n" + "add x22, x22, #0x10\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n" "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n" @@ -694,36 +694,36 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "udot z11.s, z1.b, z15.b\n" "udot z13.s, z3.b, z15.b\n" "36:" // Height 3: Multiply loop: unique 5: skip row sum - "sub x24, x24, #0x10\n" - "cmp x24, #0x10\n" + "sub x25, x25, #0x10\n" + "cmp x25, #0x10\n" "bgt 35b\n" "37:" // Height 3: Multiply loop: Single iteration only - "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" - "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "subs x24, x24, #0x8\n" - "ld1rqb { z2.b }, p0/Z, [x22]\n" + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "ld1rqb { z2.b }, p0/Z, [x23]\n" + "ld1rqb { z3.b }, p0/Z, [x22]\n" "trn1 z0.d, z1.d, z2.d\n" - "ld1rqb { z3.b }, p0/Z, [x21]\n" "trn2 z1.d, z1.d, z2.d\n" + "ld1b { z5.b }, p2/Z, [x28]\n" + "trn1 z2.d, z3.d, z4.d\n" + "trn2 z3.d, z3.d, z4.d\n" + ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" + "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" + ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" - ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" - ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" + "subs x25, x25, #0x8\n" "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - "trn1 z2.d, z3.d, z4.d\n" "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" - "trn2 z3.d, z3.d, z4.d\n" + ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" + ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" - ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" - "addvl x28, x28, #8\n" - ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n" ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n" + "addvl x28, x28, #8\n" ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" ".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" @@ -735,23 +735,23 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "ble 38f\n" "ld1b { z6.b }, p2/Z, [x28]\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" - "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n" ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n" + "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n" "ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n" ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n" - "ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n" ".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n" + "ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n" - "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" ".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n" + "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n" - "addvl x28, x28, #8\n" ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" ".inst 0x45c9987d // ummla z29.s, z3.b, z9.b\n" ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n" ".inst 0x45ca987a // ummla z26.s, z3.b, z10.b\n" + "addvl x28, x28, #8\n" ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n" ".inst 0x45c4987e // ummla z30.s, z3.b, z4.b\n" ".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n" @@ -765,17 +765,17 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "udot z11.s, z1.b, z15.b\n" "udot z13.s, z3.b, z15.b\n" "39:" // Height 3: Multiply loop: unique 6: skip row sum - "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" - "add x25, x25, #0x1\n" - "cmp x25, x19\n" + "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" + "add x26, x26, #0x1\n" + "cmp x26, x20\n" "bne 32b\n" + "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" "uzp1 z7.d, z16.d, z20.d\n" - "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" + "add x22, x27, x20\n" "uzp2 z16.d, z16.d, z20.d\n" - "add x21, x26, x19\n" "uzp1 z20.d, z17.d, z21.d\n" "uzp2 z17.d, z17.d, z21.d\n" - "add x20, x21, x19\n" + "add x21, x22, x20\n" "uzp1 z21.d, z18.d, z22.d\n" "uzp2 z18.d, z18.d, z22.d\n" "uzp1 z22.d, z19.d, z23.d\n" @@ -786,37 +786,37 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "uzp1 z27.d, z27.d, z31.d\n" "mov z31.d, z7.d\n" "tbnz %x[flags], #31, 40f\n" + "add x23, %x[qp], %[b_offset]\n" + "ld1rw { z3.s }, p2/Z, [x23]\n" ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n" - "add x22, %x[qp], %[b_offset]\n" - "ld1rw { z3.s }, p2/Z, [x22]\n" ".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n" + "neg z3.s, p2/M, z3.s\n" "mov z12.s, z11.s[3]\n" "mov z11.s, z11.s[0]\n" - "neg z3.s, p2/M, z3.s\n" - "mov z13.s, z13.s[0]\n" "mul z11.s, p2/M, z11.s, z3.s\n" + "mov z13.s, z13.s[0]\n" "mul z12.s, p2/M, z12.s, z3.s\n" "mul z13.s, p2/M, z13.s, z3.s\n" "40:" // Height 3: skip row sum fixup "add z31.s, z31.s, z11.s\n" - "ld1w { z0.s }, p2/Z, [x27]\n" - "orr %x[flags], %x[flags], #0x80000000\n" "add z20.s, z20.s, z11.s\n" - "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" + "ld1w { z0.s }, p2/Z, [x10]\n" + "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n" "add z21.s, z21.s, z11.s\n" - "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n" - "add x22, %x[qp], %[per_layer_mul]\n" "add z22.s, z22.s, z11.s\n" - "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n" - "addvl x27, x27, #4\n" + "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n" + "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n" "add z16.s, z16.s, z12.s\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" "add z17.s, z17.s, z12.s\n" + "add x23, %x[qp], %[per_layer_mul]\n" + "orr %x[flags], %x[flags], #0x80000000\n" "add z18.s, z18.s, z12.s\n" "add z19.s, z19.s, z12.s\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" + "add x23, %x[qp], %[per_layer_right_shift]\n" "add z24.s, z24.s, z13.s\n" "add z25.s, z25.s, z13.s\n" + "addvl x10, x10, #4\n" "add z26.s, z26.s, z13.s\n" "add z27.s, z27.s, z13.s\n" "add z31.s, z31.s, z0.s\n" @@ -828,8 +828,8 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "add z18.s, z18.s, z2.s\n" "add z19.s, z19.s, z3.s\n" "add z24.s, z24.s, z0.s\n" - "ld1rw { z0.s }, p2/Z, [x23]\n" "add z25.s, z25.s, z1.s\n" + "ld1rw { z0.s }, p2/Z, [x23]\n" "add z26.s, z26.s, z2.s\n" "add z27.s, z27.s, z3.s\n" ".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n" @@ -848,131 +848,131 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "and z4.d, z31.d, z0.d\n" "and z5.d, z20.d, z0.d\n" "and z6.d, z21.d, z0.d\n" + "and z7.d, z22.d, z0.d\n" + "and z8.d, z16.d, z0.d\n" "asr z4.s, z4.s, #0x1f\n" "asr z5.s, z5.s, #0x1f\n" "asr z6.s, z6.s, #0x1f\n" + "asr z7.s, z7.s, #0x1f\n" + "asr z8.s, z8.s, #0x1f\n" "sqadd z31.s, z31.s, z4.s\n" "sqadd z20.s, z20.s, z5.s\n" "sqadd z21.s, z21.s, z6.s\n" - "and z7.d, z22.d, z0.d\n" - "and z8.d, z16.d, z0.d\n" - "and z9.d, z17.d, z0.d\n" - "asr z7.s, z7.s, #0x1f\n" - "asr z8.s, z8.s, #0x1f\n" - "asr z9.s, z9.s, #0x1f\n" "sqadd z22.s, z22.s, z7.s\n" "sqadd z16.s, z16.s, z8.s\n" - "sqadd z17.s, z17.s, z9.s\n" + "and z9.d, z17.d, z0.d\n" "and z10.d, z18.d, z0.d\n" "and z4.d, z19.d, z0.d\n" "and z5.d, z24.d, z0.d\n" - "asr z10.s, z10.s, #0x1f\n" - "asr z4.s, z4.s, #0x1f\n" - "asr z5.s, z5.s, #0x1f\n" - "sqadd z18.s, z18.s, z10.s\n" - "sqadd z19.s, z19.s, z4.s\n" - "sqadd z24.s, z24.s, z5.s\n" "and z6.d, z25.d, z0.d\n" "and z7.d, z26.d, z0.d\n" "and z8.d, z27.d, z0.d\n" + "asr z9.s, z9.s, #0x1f\n" + "asr z10.s, z10.s, #0x1f\n" + "asr z4.s, z4.s, #0x1f\n" + "asr z5.s, z5.s, #0x1f\n" "asr z6.s, z6.s, #0x1f\n" "asr z7.s, z7.s, #0x1f\n" "asr z8.s, z8.s, #0x1f\n" + "sqadd z17.s, z17.s, z9.s\n" + "sqadd z18.s, z18.s, z10.s\n" + "sqadd z19.s, z19.s, z4.s\n" + "sqadd z24.s, z24.s, z5.s\n" "sqadd z25.s, z25.s, z6.s\n" "sqadd z26.s, z26.s, z7.s\n" "sqadd z27.s, z27.s, z8.s\n" "41:" // Height 3: no shift correction + "add x23, %x[qp], %[c_offset]\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n" - "add x22, %x[qp], %[c_offset]\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" + "add z31.s, z31.s, z4.s\n" ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n" - "add x22, %x[qp], %[minval]\n" ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n" - "ld1rw { z5.s }, p2/Z, [x22]\n" - "add x22, %x[qp], %[maxval]\n" - ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n" - "ld1rw { z6.s }, p2/Z, [x22]\n" - ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n" - "add z31.s, z31.s, z4.s\n" "add z20.s, z20.s, z4.s\n" "add z21.s, z21.s, z4.s\n" + ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n" + ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n" "add z22.s, z22.s, z4.s\n" "add z16.s, z16.s, z4.s\n" + ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n" + ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n" + "add z17.s, z17.s, z4.s\n" + "add z18.s, z18.s, z4.s\n" + ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" + ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n" + "add z19.s, z19.s, z4.s\n" + "add z24.s, z24.s, z4.s\n" + ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n" + ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n" + "add z25.s, z25.s, z4.s\n" + "add z26.s, z26.s, z4.s\n" + ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n" + "add x23, %x[qp], %[maxval]\n" + "ld1rw { z6.s }, p2/Z, [x23]\n" + "add z27.s, z27.s, z4.s\n" + "add x23, %x[qp], %[minval]\n" + "ld1rw { z5.s }, p2/Z, [x23]\n" "smin z31.s, p2/M, z31.s, z6.s\n" "smin z20.s, p2/M, z20.s, z6.s\n" "smin z21.s, p2/M, z21.s, z6.s\n" "smin z22.s, p2/M, z22.s, z6.s\n" + "smin z16.s, p2/M, z16.s, z6.s\n" + "smin z17.s, p2/M, z17.s, z6.s\n" + "smin z18.s, p2/M, z18.s, z6.s\n" + "smin z19.s, p2/M, z19.s, z6.s\n" + "smin z24.s, p2/M, z24.s, z6.s\n" + "smin z25.s, p2/M, z25.s, z6.s\n" + "smin z26.s, p2/M, z26.s, z6.s\n" + "smin z27.s, p2/M, z27.s, z6.s\n" "smax z31.s, p2/M, z31.s, z5.s\n" "smax z20.s, p2/M, z20.s, z5.s\n" "smax z21.s, p2/M, z21.s, z5.s\n" - "smax z22.s, p2/M, z22.s, z5.s\n" - "smin z16.s, p2/M, z16.s, z6.s\n" "uzp1 z31.h, z31.h, z20.h\n" - ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n" - "uzp1 z20.h, z21.h, z22.h\n" + "smax z22.s, p2/M, z22.s, z5.s\n" "smax z16.s, p2/M, z16.s, z5.s\n" + "uzp1 z20.h, z21.h, z22.h\n" "uzp1 z31.b, z31.b, z20.b\n" - "st1b { z31.b }, p1, [x26]\n" - "add z17.s, z17.s, z4.s\n" - "addvl x26, x26, #1\n" - ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n" - ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" - ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n" - "smin z17.s, p2/M, z17.s, z6.s\n" - ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n" - "add z18.s, z18.s, z4.s\n" - "add z19.s, z19.s, z4.s\n" - "add z24.s, z24.s, z4.s\n" - "add z25.s, z25.s, z4.s\n" "smax z17.s, p2/M, z17.s, z5.s\n" - "smin z18.s, p2/M, z18.s, z6.s\n" - "smin z19.s, p2/M, z19.s, z6.s\n" - "smin z24.s, p2/M, z24.s, z6.s\n" - "uzp1 z16.h, z16.h, z17.h\n" "smax z18.s, p2/M, z18.s, z5.s\n" + "uzp1 z16.h, z16.h, z17.h\n" + "st1b { z31.b }, p1, [x27]\n" "smax z19.s, p2/M, z19.s, z5.s\n" "smax z24.s, p2/M, z24.s, z5.s\n" - "smin z25.s, p2/M, z25.s, z6.s\n" - ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n" "uzp1 z17.h, z18.h, z19.h\n" - ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n" "uzp1 z16.b, z16.b, z17.b\n" - "st1b { z16.b }, p1, [x21]\n" - "add z26.s, z26.s, z4.s\n" "smax z25.s, p2/M, z25.s, z5.s\n" - "add z27.s, z27.s, z4.s\n" - "smin z26.s, p2/M, z26.s, z6.s\n" - "uzp1 z24.h, z24.h, z25.h\n" - "smin z27.s, p2/M, z27.s, z6.s\n" "smax z26.s, p2/M, z26.s, z5.s\n" + "uzp1 z24.h, z24.h, z25.h\n" + "st1b { z16.b }, p1, [x22]\n" "smax z27.s, p2/M, z27.s, z5.s\n" "uzp1 z25.h, z26.h, z27.h\n" "uzp1 z24.b, z24.b, z25.b\n" - "st1b { z24.b }, p1, [x20]\n" + "st1b { z24.b }, p1, [x21]\n" + "addvl x27, x27, #1\n" "42:" // Height 3: Writeback done "decw x9, ALL, MUL #4\n" "cmp x9, XZR\n" "bgt 30b\n" "b 58f\n" "43:" // Height 4 + "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n" + "mov x20, #0x4\n" + "mov x10, %x[col_bias]\n" "mov z11.s, #0x0\n" - "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" - "mov x27, %x[col_bias]\n" "mov z12.s, #0x0\n" - "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" - "bic %x[flags], %x[flags], #0x80000000\n" "mov z13.s, #0x0\n" - "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" - "mov x26, %x[output_ptr]\n" + "bic %x[flags], %x[flags], #0x80000000\n" + "ldr x9, [%x[args_ptr], %[offsetof_N]]\n" "mov z14.s, #0x0\n" - "mov x19, #0x4\n" "mov z15.b, #0x1\n" - "madd %x[output_ptr], x20, x19, %x[output_ptr]\n" + "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n" + "mov x27, %x[output_ptr]\n" + "madd %x[output_ptr], x21, x20, %x[output_ptr]\n" "44:" // Height 4: Column loop + "mov x20, #0x0\n" + "whilelt p1.b, x20, x9\n" "mov z16.s, #0x0\n" - "mov x19, #0x0\n" "mov z17.s, #0x0\n" - "whilelt p1.b, x19, x9\n" "mov z18.s, #0x0\n" "mov z19.s, #0x0\n" "mov z20.s, #0x0\n" @@ -988,85 +988,85 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "mov z30.s, #0x0\n" "mov z31.s, #0x0\n" "45:" // Height 4: setup done - "mov x25, #0x0\n" + "mov x26, #0x0\n" "46:" // Height 4: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" - "ldr w24, [x20, x25, LSL #0x2]\n" + "ldr w25, [x20, x26, LSL #0x2]\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 47f\n" - "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n" - "add x20, x20, x19, LSL #3\n" - "ldr x23, [x20, #0x0]\n" - "ldr x22, [x20, #0x8]\n" - "ldr x21, [x20, #0x10]\n" - "ldr x20, [x20, #0x18]\n" - "cbnz x25, 48f\n" - "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" - "add x23, x23, x19\n" - "add x22, x22, x19\n" - "add x21, x21, x19\n" - "add x20, x20, x19\n" + "ldr x21, [%x[input_ptr], x26, LSL #0x3]\n" + "add x21, x21, x20, LSL #3\n" + "ldr x24, [x21, #0x0]\n" + "ldr x23, [x21, #0x8]\n" + "ldr x22, [x21, #0x10]\n" + "ldr x21, [x21, #0x18]\n" + "cbnz x26, 48f\n" + "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n" + "add x24, x24, x20\n" + "add x23, x23, x20\n" + "add x22, x22, x20\n" + "add x21, x21, x20\n" "b 48f\n" "47:" // Height 4: setup direct input - "mov x23, %x[input_ptr]\n" - "add x22, x23, x19\n" - "add x21, x22, x19\n" - "add x20, x21, x19\n" + "mov x24, %x[input_ptr]\n" + "add x23, x24, x20\n" + "add x22, x23, x20\n" + "add x21, x22, x20\n" "48:" // Height 4: input setup done - "cmp x24, #0x10\n" + "cmp x25, #0x10\n" "ble 51f\n" "49:" // Height 4: Multiply loop: Main loop head - "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" - "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "add x23, x23, #0x10\n" - "ld1rqb { z2.b }, p0/Z, [x22]\n" + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "ld1rqb { z2.b }, p0/Z, [x23]\n" "trn1 z0.d, z1.d, z2.d\n" - "ld1rqb { z3.b }, p0/Z, [x21]\n" - "add x22, x22, #0x10\n" + "ld1rqb { z3.b }, p0/Z, [x22]\n" + "ld1rqb { z4.b }, p0/Z, [x21]\n" "trn2 z1.d, z1.d, z2.d\n" - "ld1rqb { z4.b }, p0/Z, [x20]\n" - "add x21, x21, #0x10\n" + "trn1 z2.d, z3.d, z4.d\n" + "ld1b { z5.b }, p2/Z, [x28]\n" + "trn2 z3.d, z3.d, z4.d\n" ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" + ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" + "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" - "add x20, x20, #0x10\n" ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" + ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - "trn1 z2.d, z3.d, z4.d\n" - "trn2 z3.d, z3.d, z4.d\n" - "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" + ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n" + "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" - ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" + ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" + ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n" "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" "addvl x28, x28, #16\n" - ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" "ld1b { z6.b }, p2/Z, [x28, #-8, MUL VL]\n" - ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n" - "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" - ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" - ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n" - "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" ".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n" - "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" + "ld1b { z7.b }, p2/Z, [x28, #-7, MUL VL]\n" + "ld1b { z8.b }, p2/Z, [x28, #-6, MUL VL]\n" ".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n" - "ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n" ".inst 0x45c49813 // ummla z19.s, z0.b, z4.b\n" + "ld1b { z9.b }, p2/Z, [x28, #-5, MUL VL]\n" + "ld1b { z10.b }, p2/Z, [x28, #-4, MUL VL]\n" ".inst 0x45c4985b // ummla z27.s, z2.b, z4.b\n" - "ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n" ".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n" + "ld1b { z4.b }, p2/Z, [x28, #-3, MUL VL]\n" + "add x24, x24, #0x10\n" ".inst 0x45c5985f // ummla z31.s, z2.b, z5.b\n" - "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" + "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n" + "add x23, x23, #0x10\n" ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n" "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n" ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n" + "add x22, x22, #0x10\n" ".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n" ".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n" + "add x21, x21, #0x10\n" ".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n" ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" ".inst 0x45c9987d // ummla z29.s, z3.b, z9.b\n" @@ -1084,38 +1084,38 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "udot z11.s, z1.b, z15.b\n" "udot z13.s, z3.b, z15.b\n" "50:" // Height 4: Multiply loop: unique 7: skip row sum - "sub x24, x24, #0x10\n" - "cmp x24, #0x10\n" + "sub x25, x25, #0x10\n" + "cmp x25, #0x10\n" "bgt 49b\n" "51:" // Height 4: Multiply loop: Single iteration only - "ld1b { z5.b }, p2/Z, [x28]\n" - "whilelt p0.b, XZR, x24\n" - "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" - "subs x24, x24, #0x8\n" - "ld1rqb { z1.b }, p0/Z, [x23]\n" - "ld1rqb { z2.b }, p0/Z, [x22]\n" + "whilelt p0.b, XZR, x25\n" + "ld1rqb { z1.b }, p0/Z, [x24]\n" + "ld1rqb { z2.b }, p0/Z, [x23]\n" "trn1 z0.d, z1.d, z2.d\n" - "ld1rqb { z3.b }, p0/Z, [x21]\n" + "ld1rqb { z3.b }, p0/Z, [x22]\n" + "ld1rqb { z4.b }, p0/Z, [x21]\n" "trn2 z1.d, z1.d, z2.d\n" - "ld1rqb { z4.b }, p0/Z, [x20]\n" - "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" + "trn1 z2.d, z3.d, z4.d\n" + "ld1b { z5.b }, p2/Z, [x28]\n" + "trn2 z3.d, z3.d, z4.d\n" ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n" + ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" + "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n" + "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n" + "subs x25, x25, #0x8\n" ".inst 0x45c69814 // ummla z20.s, z0.b, z6.b\n" "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n" "ld1b { z9.b }, p2/Z, [x28, #4, MUL VL]\n" - "trn1 z2.d, z3.d, z4.d\n" - "trn2 z3.d, z3.d, z4.d\n" - "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" + ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n" + "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #6, MUL VL]\n" - ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n" - "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" - "addvl x28, x28, #8\n" - ".inst 0x45c6985c // ummla z28.s, z2.b, z6.b\n" ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n" ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n" + "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n" ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n" ".inst 0x45c99812 // ummla z18.s, z0.b, z9.b\n" + "addvl x28, x28, #8\n" ".inst 0x45c9985a // ummla z26.s, z2.b, z9.b\n" ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n" ".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n" @@ -1126,23 +1126,23 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "ble 52f\n" "ld1b { z6.b }, p2/Z, [x28]\n" ".inst 0x45c69830 // ummla z16.s, z1.b, z6.b\n" - "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n" ".inst 0x45c69878 // ummla z24.s, z3.b, z6.b\n" + "ld1b { z7.b }, p2/Z, [x28, #1, MUL VL]\n" "ld1b { z8.b }, p2/Z, [x28, #2, MUL VL]\n" "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n" ".inst 0x45c79834 // ummla z20.s, z1.b, z7.b\n" - "ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n" ".inst 0x45c7987c // ummla z28.s, z3.b, z7.b\n" + "ld1b { z10.b }, p2/Z, [x28, #4, MUL VL]\n" "ld1b { z4.b }, p2/Z, [x28, #5, MUL VL]\n" ".inst 0x45c89831 // ummla z17.s, z1.b, z8.b\n" - "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" ".inst 0x45c89879 // ummla z25.s, z3.b, z8.b\n" + "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n" "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n" - "addvl x28, x28, #8\n" ".inst 0x45c99835 // ummla z21.s, z1.b, z9.b\n" ".inst 0x45c9987d // ummla z29.s, z3.b, z9.b\n" ".inst 0x45ca9832 // ummla z18.s, z1.b, z10.b\n" ".inst 0x45ca987a // ummla z26.s, z3.b, z10.b\n" + "addvl x28, x28, #8\n" ".inst 0x45c49836 // ummla z22.s, z1.b, z4.b\n" ".inst 0x45c4987e // ummla z30.s, z3.b, z4.b\n" ".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n" @@ -1156,19 +1156,19 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "udot z11.s, z1.b, z15.b\n" "udot z13.s, z3.b, z15.b\n" "53:" // Height 4: Multiply loop: unique 8: skip row sum - "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" - "add x25, x25, #0x1\n" - "cmp x25, x19\n" + "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n" + "add x26, x26, #0x1\n" + "cmp x26, x20\n" "bne 46b\n" + "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n" "uzp1 z7.d, z16.d, z20.d\n" - "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" + "add x22, x27, x20\n" + "add x21, x22, x20\n" "uzp2 z16.d, z16.d, z20.d\n" - "add x21, x26, x19\n" "uzp1 z20.d, z17.d, z21.d\n" + "add x20, x21, x20\n" "uzp2 z17.d, z17.d, z21.d\n" - "add x20, x21, x19\n" "uzp1 z21.d, z18.d, z22.d\n" - "add x19, x20, x19\n" "uzp2 z18.d, z18.d, z22.d\n" "uzp1 z22.d, z19.d, z23.d\n" "uzp2 z19.d, z19.d, z23.d\n" @@ -1182,39 +1182,39 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "uzp2 z27.d, z27.d, z31.d\n" "mov z31.d, z7.d\n" "tbnz %x[flags], #31, 54f\n" + "add x23, %x[qp], %[b_offset]\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n" - "add x22, %x[qp], %[b_offset]\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" ".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n" + "neg z4.s, p2/M, z4.s\n" "mov z12.s, z11.s[3]\n" "mov z11.s, z11.s[0]\n" - "neg z4.s, p2/M, z4.s\n" + "mul z11.s, p2/M, z11.s, z4.s\n" "mov z14.s, z13.s[3]\n" "mov z13.s, z13.s[0]\n" - "mul z11.s, p2/M, z11.s, z4.s\n" "mul z12.s, p2/M, z12.s, z4.s\n" "mul z13.s, p2/M, z13.s, z4.s\n" "mul z14.s, p2/M, z14.s, z4.s\n" "54:" // Height 4: skip row sum fixup "add z31.s, z31.s, z11.s\n" - "ld1w { z0.s }, p2/Z, [x27]\n" - "orr %x[flags], %x[flags], #0x80000000\n" "add z20.s, z20.s, z11.s\n" - "ld1w { z1.s }, p2/Z, [x27, #1, MUL VL]\n" - "add x23, %x[qp], %[per_layer_right_shift]\n" + "ld1w { z0.s }, p2/Z, [x10]\n" + "ld1w { z1.s }, p2/Z, [x10, #1, MUL VL]\n" "add z21.s, z21.s, z11.s\n" - "ld1w { z2.s }, p2/Z, [x27, #2, MUL VL]\n" - "add x22, %x[qp], %[per_layer_mul]\n" "add z22.s, z22.s, z11.s\n" - "ld1w { z3.s }, p2/Z, [x27, #3, MUL VL]\n" - "addvl x27, x27, #4\n" + "ld1w { z2.s }, p2/Z, [x10, #2, MUL VL]\n" + "ld1w { z3.s }, p2/Z, [x10, #3, MUL VL]\n" "add z16.s, z16.s, z12.s\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" "add z17.s, z17.s, z12.s\n" + "add x23, %x[qp], %[per_layer_mul]\n" + "orr %x[flags], %x[flags], #0x80000000\n" "add z18.s, z18.s, z12.s\n" "add z19.s, z19.s, z12.s\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" + "add x23, %x[qp], %[per_layer_right_shift]\n" "add z23.s, z23.s, z13.s\n" "add z28.s, z28.s, z13.s\n" + "addvl x10, x10, #4\n" "add z29.s, z29.s, z13.s\n" "add z30.s, z30.s, z13.s\n" "add z24.s, z24.s, z14.s\n" @@ -1234,8 +1234,8 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "add z29.s, z29.s, z2.s\n" "add z30.s, z30.s, z3.s\n" "add z24.s, z24.s, z0.s\n" - "ld1rw { z0.s }, p2/Z, [x23]\n" "add z25.s, z25.s, z1.s\n" + "ld1rw { z0.s }, p2/Z, [x23]\n" "add z26.s, z26.s, z2.s\n" "add z27.s, z27.s, z3.s\n" ".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n" @@ -1257,160 +1257,160 @@ void sve_hybrid_u8qa_mmla_4x4VL ( "tbz %x[flags], #5, 55f\n" "and z4.d, z31.d, z0.d\n" "and z5.d, z20.d, z0.d\n" - "and z6.d, z21.d, z0.d\n" "asr z4.s, z4.s, #0x1f\n" "asr z5.s, z5.s, #0x1f\n" - "asr z6.s, z6.s, #0x1f\n" "sqadd z31.s, z31.s, z4.s\n" "sqadd z20.s, z20.s, z5.s\n" - "sqadd z21.s, z21.s, z6.s\n" + "and z6.d, z21.d, z0.d\n" "and z7.d, z22.d, z0.d\n" "and z8.d, z16.d, z0.d\n" "and z9.d, z17.d, z0.d\n" - "asr z7.s, z7.s, #0x1f\n" - "asr z8.s, z8.s, #0x1f\n" - "asr z9.s, z9.s, #0x1f\n" - "sqadd z22.s, z22.s, z7.s\n" - "sqadd z16.s, z16.s, z8.s\n" - "sqadd z17.s, z17.s, z9.s\n" "and z10.d, z18.d, z0.d\n" "and z4.d, z19.d, z0.d\n" "and z5.d, z23.d, z0.d\n" + "asr z6.s, z6.s, #0x1f\n" + "asr z7.s, z7.s, #0x1f\n" + "asr z8.s, z8.s, #0x1f\n" + "asr z9.s, z9.s, #0x1f\n" "asr z10.s, z10.s, #0x1f\n" "asr z4.s, z4.s, #0x1f\n" "asr z5.s, z5.s, #0x1f\n" + "sqadd z21.s, z21.s, z6.s\n" + "sqadd z22.s, z22.s, z7.s\n" + "sqadd z16.s, z16.s, z8.s\n" + "sqadd z17.s, z17.s, z9.s\n" "sqadd z18.s, z18.s, z10.s\n" "sqadd z19.s, z19.s, z4.s\n" "sqadd z23.s, z23.s, z5.s\n" "and z6.d, z28.d, z0.d\n" "and z7.d, z29.d, z0.d\n" "and z8.d, z30.d, z0.d\n" - "asr z6.s, z6.s, #0x1f\n" - "asr z7.s, z7.s, #0x1f\n" - "asr z8.s, z8.s, #0x1f\n" - "sqadd z28.s, z28.s, z6.s\n" - "sqadd z29.s, z29.s, z7.s\n" - "sqadd z30.s, z30.s, z8.s\n" "and z9.d, z24.d, z0.d\n" "and z10.d, z25.d, z0.d\n" "and z4.d, z26.d, z0.d\n" + "and z5.d, z27.d, z0.d\n" + "asr z6.s, z6.s, #0x1f\n" + "asr z7.s, z7.s, #0x1f\n" + "asr z8.s, z8.s, #0x1f\n" "asr z9.s, z9.s, #0x1f\n" "asr z10.s, z10.s, #0x1f\n" "asr z4.s, z4.s, #0x1f\n" + "asr z5.s, z5.s, #0x1f\n" + "sqadd z28.s, z28.s, z6.s\n" + "sqadd z29.s, z29.s, z7.s\n" + "sqadd z30.s, z30.s, z8.s\n" "sqadd z24.s, z24.s, z9.s\n" "sqadd z25.s, z25.s, z10.s\n" "sqadd z26.s, z26.s, z4.s\n" - "and z5.d, z27.d, z0.d\n" - "asr z5.s, z5.s, #0x1f\n" "sqadd z27.s, z27.s, z5.s\n" "55:" // Height 4: no shift correction + "add x23, %x[qp], %[c_offset]\n" + "ld1rw { z4.s }, p2/Z, [x23]\n" ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n" - "add x22, %x[qp], %[c_offset]\n" - "ld1rw { z4.s }, p2/Z, [x22]\n" + "add z31.s, z31.s, z4.s\n" ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n" - "add x22, %x[qp], %[minval]\n" ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n" - "ld1rw { z5.s }, p2/Z, [x22]\n" - "add x22, %x[qp], %[maxval]\n" - ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n" - "ld1rw { z6.s }, p2/Z, [x22]\n" - ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n" - "add z31.s, z31.s, z4.s\n" "add z20.s, z20.s, z4.s\n" "add z21.s, z21.s, z4.s\n" + ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n" + ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n" "add z22.s, z22.s, z4.s\n" "add z16.s, z16.s, z4.s\n" + ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n" + ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n" + "add z17.s, z17.s, z4.s\n" + "add z18.s, z18.s, z4.s\n" + ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" + ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n" + "add z19.s, z19.s, z4.s\n" + "add z23.s, z23.s, z4.s\n" + ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n" + ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n" + "add z28.s, z28.s, z4.s\n" + "add z29.s, z29.s, z4.s\n" + ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n" + ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n" + "add z30.s, z30.s, z4.s\n" + "add z24.s, z24.s, z4.s\n" + ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n" + ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n" + "add z25.s, z25.s, z4.s\n" + "add z26.s, z26.s, z4.s\n" + ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n" + "add x23, %x[qp], %[maxval]\n" + "ld1rw { z6.s }, p2/Z, [x23]\n" + "add z27.s, z27.s, z4.s\n" + "add x23, %x[qp], %[minval]\n" + "ld1rw { z5.s }, p2/Z, [x23]\n" "smin z31.s, p2/M, z31.s, z6.s\n" "smin z20.s, p2/M, z20.s, z6.s\n" "smin z21.s, p2/M, z21.s, z6.s\n" "smin z22.s, p2/M, z22.s, z6.s\n" + "smin z16.s, p2/M, z16.s, z6.s\n" + "smin z17.s, p2/M, z17.s, z6.s\n" + "smin z18.s, p2/M, z18.s, z6.s\n" + "smin z19.s, p2/M, z19.s, z6.s\n" + "smin z23.s, p2/M, z23.s, z6.s\n" + "smin z28.s, p2/M, z28.s, z6.s\n" + "smin z29.s, p2/M, z29.s, z6.s\n" + "smin z30.s, p2/M, z30.s, z6.s\n" + "smin z24.s, p2/M, z24.s, z6.s\n" + "smin z25.s, p2/M, z25.s, z6.s\n" + "smin z26.s, p2/M, z26.s, z6.s\n" + "smin z27.s, p2/M, z27.s, z6.s\n" "smax z31.s, p2/M, z31.s, z5.s\n" "smax z20.s, p2/M, z20.s, z5.s\n" "smax z21.s, p2/M, z21.s, z5.s\n" - "smax z22.s, p2/M, z22.s, z5.s\n" - "smin z16.s, p2/M, z16.s, z6.s\n" "uzp1 z31.h, z31.h, z20.h\n" - ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n" - "uzp1 z20.h, z21.h, z22.h\n" + "smax z22.s, p2/M, z22.s, z5.s\n" "smax z16.s, p2/M, z16.s, z5.s\n" + "uzp1 z20.h, z21.h, z22.h\n" "uzp1 z31.b, z31.b, z20.b\n" - "st1b { z31.b }, p1, [x26]\n" - "add z17.s, z17.s, z4.s\n" - "addvl x26, x26, #1\n" - ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n" - ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n" - ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n" - "smin z17.s, p2/M, z17.s, z6.s\n" - ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n" - "add z18.s, z18.s, z4.s\n" - "add z19.s, z19.s, z4.s\n" - "add z23.s, z23.s, z4.s\n" - "add z28.s, z28.s, z4.s\n" "smax z17.s, p2/M, z17.s, z5.s\n" - "smin z18.s, p2/M, z18.s, z6.s\n" - "smin z19.s, p2/M, z19.s, z6.s\n" - "smin z23.s, p2/M, z23.s, z6.s\n" - "uzp1 z16.h, z16.h, z17.h\n" "smax z18.s, p2/M, z18.s, z5.s\n" + "uzp1 z16.h, z16.h, z17.h\n" + "st1b { z31.b }, p1, [x27]\n" "smax z19.s, p2/M, z19.s, z5.s\n" "smax z23.s, p2/M, z23.s, z5.s\n" - "smin z28.s, p2/M, z28.s, z6.s\n" - ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n" "uzp1 z17.h, z18.h, z19.h\n" - ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n" "uzp1 z16.b, z16.b, z17.b\n" - "st1b { z16.b }, p1, [x21]\n" - "add z29.s, z29.s, z4.s\n" "smax z28.s, p2/M, z28.s, z5.s\n" - "add z30.s, z30.s, z4.s\n" - ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n" - "smin z29.s, p2/M, z29.s, z6.s\n" - "uzp1 z23.h, z23.h, z28.h\n" - "smin z30.s, p2/M, z30.s, z6.s\n" - "add z24.s, z24.s, z4.s\n" "smax z29.s, p2/M, z29.s, z5.s\n" - ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n" + "uzp1 z23.h, z23.h, z28.h\n" + "st1b { z16.b }, p1, [x22]\n" "smax z30.s, p2/M, z30.s, z5.s\n" - "smin z24.s, p2/M, z24.s, z6.s\n" - ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n" - "add z25.s, z25.s, z4.s\n" - "uzp1 z28.h, z29.h, z30.h\n" "smax z24.s, p2/M, z24.s, z5.s\n" - "add z26.s, z26.s, z4.s\n" + "uzp1 z28.h, z29.h, z30.h\n" "uzp1 z23.b, z23.b, z28.b\n" - "st1b { z23.b }, p1, [x20]\n" - "smin z25.s, p2/M, z25.s, z6.s\n" - "smin z26.s, p2/M, z26.s, z6.s\n" - ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n" "smax z25.s, p2/M, z25.s, z5.s\n" "smax z26.s, p2/M, z26.s, z5.s\n" - "add z27.s, z27.s, z4.s\n" "uzp1 z24.h, z24.h, z25.h\n" - "smin z27.s, p2/M, z27.s, z6.s\n" + "st1b { z23.b }, p1, [x21]\n" "smax z27.s, p2/M, z27.s, z5.s\n" "uzp1 z25.h, z26.h, z27.h\n" "uzp1 z24.b, z24.b, z25.b\n" - "st1b { z24.b }, p1, [x19]\n" + "st1b { z24.b }, p1, [x20]\n" + "addvl x27, x27, #1\n" "56:" // Height 4: Writeback done "decw x9, ALL, MUL #4\n" "cmp x9, XZR\n" "bgt 44b\n" "subs %x[M], %x[M], #0x4\n" "beq 58f\n" - "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 57f\n" - "add x20, x20, #0x4\n" - "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n" + "add x21, x21, #0x4\n" + "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n" "b 1b\n" "57:" // Update direct input - "mov x19, #0x4\n" - "madd %x[input_ptr], x19, x20, %x[input_ptr]\n" + "mov x20, #0x4\n" + "madd %x[input_ptr], x20, x21, %x[input_ptr]\n" "b 1b\n" "58:" // Exit : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) - : "cc", "memory", "p0", "p1", "p2", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); } -- cgit v1.2.1