aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp1878
1 files changed, 939 insertions, 939 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
index 6041794bdb..6aba002706 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -95,17 +95,17 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"2:" // Height 1: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -113,124 +113,124 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"3:" // Height 1: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "cbnz x28, 6f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 6f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x26, %x[input_ptr]\n"
+ "mov x25, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "cmp x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
"bgt 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "addvl x9, x9, #8\n"
"ble 9f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "addvl x9, x9, #8\n"
"9:" // Height 1: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 4b\n"
"uzp1 z8.d, z8.d, z12.d\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
"uzp1 z9.d, z9.d, z13.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp1 z10.d, z10.d, z14.d\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
"uzp1 z11.d, z11.d, z15.d\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"mov z15.d, z8.d\n"
- "add z15.s, z15.s, z0.s\n"
- "addvl x14, x14, #4\n"
"add z9.s, z9.s, z1.s\n"
+ "add z15.s, z15.s, z0.s\n"
"add z10.s, z10.s, z2.s\n"
"add z11.s, z11.s, z3.s\n"
"tbz %x[flags], #4, 10f\n"
@@ -241,20 +241,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 11f\n"
"10:" // Height 1: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"11:" // Height 1: parameters loaded
".inst 0x04a475ef // sqrdmulh z15.s, z15.s, z4.s\n"
@@ -269,26 +269,26 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z15.s, z15.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
"sqadd z10.s, z10.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z11.s, z11.s, z7.s\n"
"12:" // Height 1: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
+ "add z15.s, z15.s, z4.s\n"
"add z9.s, z9.s, z4.s\n"
"add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
"add z11.s, z11.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z15.s, p2/M, z15.s, z6.s\n"
"smin z9.s, p2/M, z9.s, z6.s\n"
"smin z10.s, p2/M, z10.s, z6.s\n"
@@ -296,29 +296,29 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
"smax z10.s, p2/M, z10.s, z5.s\n"
- "uzp1 z15.h, z15.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
+ "uzp1 z15.h, z15.h, z9.h\n"
"uzp1 z9.h, z10.h, z11.h\n"
"uzp1 z15.b, z15.b, z9.b\n"
- "st1b { z15.b }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z15.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"13:" // Height 1: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"15:" // Height 2: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -326,133 +326,133 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"16:" // Height 2: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"17:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 18f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "cbnz x28, 19f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
"b 19f\n"
"18:" // Height 2: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
"19:" // Height 2: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 21f\n"
"20:" // Height 2: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"bgt 20b\n"
"21:" // Height 2: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z2.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
- "addvl x9, x9, #8\n"
"ble 22f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- "addvl x9, x9, #8\n"
"22:" // Height 2: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 17b\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x24, x11, x20\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "addvl x14, x14, #4\n"
"mov z15.d, z7.d\n"
"add z15.s, z15.s, z0.s\n"
"add z12.s, z12.s, z1.s\n"
@@ -470,20 +470,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 24f\n"
"23:" // Height 2: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"24:" // Height 2: parameters loaded
".inst 0x04a475ef // sqrdmulh z15.s, z15.s, z4.s\n"
@@ -498,92 +498,92 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z15.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z15.s, z15.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
"25:" // Height 2: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z11.s, z11.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z15.s, p2/M, z15.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z15.h, z15.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z15.h, z15.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z15.b, z15.b, z12.b\n"
+ "st1b { z15.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z15.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
"uzp1 z8.b, z8.b, z9.b\n"
- "st1b { z8.b }, p1, [x24]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z8.b }, p1, [x23]\n"
"26:" // Height 2: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"28:" // Height 3: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -599,176 +599,176 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"29:" // Height 3: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "cbnz x28, 32f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 32f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
"b 32f\n"
"31:" // Height 3: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
"32:" // Height 3: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
"ble 35f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"35:" // Height 3: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 30b\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x24, x11, x20\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x23, x24, x20\n"
- "addvl x14, x14, #4\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
"uzp1 z18.d, z18.d, z22.d\n"
@@ -794,20 +794,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 37f\n"
"36:" // Height 3: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"37:" // Height 3: parameters loaded
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
@@ -826,124 +826,124 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z23.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z16.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z16.d, z0.d\n"
+ "sqadd z16.s, z16.s, z4.s\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
"and z7.d, z19.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
"sqadd z19.s, z19.s, z7.s\n"
"38:" // Height 3: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z19.s, z19.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z23.h, z23.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z23.h, z23.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z23.b, z23.b, z12.b\n"
+ "st1b { z23.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z23.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z18.s, z18.s, z4.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x23]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z16.b }, p1, [x22]\n"
"39:" // Height 3: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"41:" // Height 4: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -959,185 +959,185 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"42:" // Height 4: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"43:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 44f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "cbnz x28, 45f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 45f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
"b 45f\n"
"44:" // Height 4: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
"45:" // Height 4: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 47f\n"
"46:" // Height 4: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "cmp x26, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "add x25, x25, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "add x23, x23, #0x10\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"bgt 46b\n"
"47:" // Height 4: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z4.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
"ble 48f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
"48:" // Height 4: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 43b\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x24, x11, x20\n"
+ "addvl x11, x11, #4\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "addvl x14, x14, #4\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1169,20 +1169,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 50f\n"
"49:" // Height 4: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"50:" // Height 4: parameters loaded
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
@@ -1205,156 +1205,156 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z23.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z15.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z15.d, z0.d\n"
+ "sqadd z15.s, z15.s, z4.s\n"
"and z5.d, z20.d, z1.d\n"
"and z6.d, z21.d, z2.d\n"
"and z7.d, z22.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z15.s, z15.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
- "and z7.d, z19.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "and z7.d, z19.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"51:" // Height 4: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z15.s, z15.s, z4.s\n"
- ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
- ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z19.s, z19.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z23.s, p2/M, z23.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z23.h, z23.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z23.h, z23.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z23.b, z23.b, z12.b\n"
+ "st1b { z23.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z23.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z21.s, z21.s, z4.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
- "smax z21.s, p2/M, z21.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "smax z21.s, p2/M, z21.s, z5.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
+ "add z17.s, z17.s, z4.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
+ "add z18.s, z18.s, z4.s\n"
"uzp1 z15.b, z15.b, z20.b\n"
+ "st1b { z15.b }, p1, [x22]\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
"smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x23]\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z16.b }, p1, [x21]\n"
"52:" // Height 4: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"54:" // Height 5: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -1378,115 +1378,115 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"55:" // Height 5: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"56:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 57f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "cbnz x28, 58f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 58f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
"b 58f\n"
"57:" // Height 5: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
"58:" // Height 5: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 60f\n"
"59:" // Height 5: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "sub x26, x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x25, x25, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
+ "add x24, x24, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
+ "add x22, x22, #0x10\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x21, #0x10\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1495,80 +1495,80 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"bgt 59b\n"
"60:" // Height 5: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "subs x26, x26, #0x8\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
"trn1 z2.d, z3.d, z4.d\n"
"trn2 z3.d, z3.d, z4.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
- "addvl x9, x9, #8\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
"ble 61f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -1576,30 +1576,30 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"61:" // Height 5: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 56b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x24, x11, x20\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "add x20, x21, x19\n"
"uzp1 z14.d, z11.d, z15.d\n"
+ "addvl x11, x11, #4\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x21, x22, x20\n"
- "addvl x14, x14, #4\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1639,20 +1639,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 63f\n"
"62:" // Height 5: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"63:" // Height 5: parameters loaded
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -1679,191 +1679,191 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z31.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z15.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z15.d, z0.d\n"
+ "sqadd z15.s, z15.s, z4.s\n"
"and z5.d, z20.d, z1.d\n"
"and z6.d, z21.d, z2.d\n"
"and z7.d, z22.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z15.s, z15.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
- "and z7.d, z19.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
- "sqadd z19.s, z19.s, z7.s\n"
+ "and z7.d, z19.d, z3.d\n"
"and z4.d, z24.d, z0.d\n"
"and z5.d, z25.d, z1.d\n"
- "and z6.d, z26.d, z2.d\n"
- "and z7.d, z27.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z19.s, z19.s, z7.s\n"
"sqadd z24.s, z24.s, z4.s\n"
"sqadd z25.s, z25.s, z5.s\n"
+ "and z6.d, z26.d, z2.d\n"
+ "and z7.d, z27.d, z3.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z26.s, z26.s, z6.s\n"
"sqadd z27.s, z27.s, z7.s\n"
"64:" // Height 5: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z15.s, z15.s, z4.s\n"
- ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
- ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
- ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z31.h, z31.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z31.b, z31.b, z12.b\n"
+ "st1b { z31.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z31.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z21.s, z21.s, z4.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
- "smax z21.s, p2/M, z21.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "smax z21.s, p2/M, z21.s, z5.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
+ "add z17.s, z17.s, z4.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
+ "add z18.s, z18.s, z4.s\n"
"uzp1 z15.b, z15.b, z20.b\n"
+ "st1b { z15.b }, p1, [x22]\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x23]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z24.s, p2/M, z24.s, z5.s\n"
+ "smax z25.s, p2/M, z25.s, z5.s\n"
+ "add z26.s, z26.s, z4.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "smax z25.s, p2/M, z25.s, z5.s\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z26.s, p2/M, z26.s, z5.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z16.b }, p1, [x22]\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x21]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z24.b }, p1, [x20]\n"
"65:" // Height 5: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
- "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x20, #0x6\n"
- "mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
+ "mov x11, %x[col_bias]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
+ "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x20, #0x6\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
"67:" // Height 6: Column loop
- "mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
+ "mov x19, #0x0\n"
"mov z9.s, #0x0\n"
+ "whilelt p1.b, x19, x10\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
@@ -1887,120 +1887,120 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov z30.s, #0x0\n"
"mov z31.s, #0x0\n"
"68:" // Height 6: setup done
- "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
"69:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w26, [x20, x27, LSL #0x2]\n"
"tbz %x[flags], #3, 70f\n"
- "ldr x21, [%x[input_ptr], x28, LSL #0x3]\n"
- "add x21, x21, x20, LSL #3\n"
- "ldr x26, [x21, #0x0]\n"
- "ldr x25, [x21, #0x8]\n"
- "ldr x24, [x21, #0x10]\n"
- "ldr x23, [x21, #0x18]\n"
- "ldr x22, [x21, #0x20]\n"
- "ldr x21, [x21, #0x28]\n"
- "cbnz x28, 71f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x26, x26, x20\n"
- "add x25, x25, x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add x22, x22, x20\n"
- "add x21, x21, x20\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 71f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19\n"
+ "add x24, x24, x19\n"
+ "add x23, x23, x19\n"
+ "add x22, x22, x19\n"
+ "add x21, x21, x19\n"
+ "add x20, x20, x19\n"
"b 71f\n"
"70:" // Height 6: setup direct input
- "mov x26, %x[input_ptr]\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19\n"
+ "add x23, x24, x19\n"
+ "add x22, x23, x19\n"
+ "add x21, x22, x19\n"
+ "add x20, x21, x19\n"
"71:" // Height 6: input setup done
- "cmp x27, #0x10\n"
+ "cmp x26, #0x10\n"
"ble 73f\n"
"72:" // Height 6: Multiply loop: Main loop head
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "sub x26, x26, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
+ "cmp x26, #0x10\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "add x24, x24, #0x10\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
+ "add x23, x23, #0x10\n"
"trn2 z3.d, z3.d, z4.d\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "add x20, x20, #0x10\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
- ".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
- "add x25, x25, #0x10\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-7, MUL VL]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-5, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -2009,81 +2009,81 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"bgt 72b\n"
"73:" // Height 6: Multiply loop: Single iteration only
- "whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z2.b }, p0/Z, [x25]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "whilelt p0.b, XZR, x26\n"
+ "subs x26, x26, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "ld1rqb { z2.b }, p0/Z, [x24]\n"
"trn1 z0.d, z1.d, z2.d\n"
- "ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x23]\n"
"trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1rqb { z6.b }, p0/Z, [x20]\n"
"trn1 z2.d, z3.d, z4.d\n"
- "ld1rqb { z5.b }, p0/Z, [x22]\n"
- "ld1rqb { z6.b }, p0/Z, [x21]\n"
"trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z6.d\n"
"trn2 z5.d, z5.d, z6.d\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45079808 // smmla z8.s, z0.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079850 // smmla z16.s, z2.b, z7.b\n"
".inst 0x45079898 // smmla z24.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506980c // smmla z12.s, z0.b, z6.b\n"
".inst 0x45069854 // smmla z20.s, z2.b, z6.b\n"
".inst 0x4506989c // smmla z28.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
".inst 0x45079851 // smmla z17.s, z2.b, z7.b\n"
".inst 0x45079899 // smmla z25.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506980d // smmla z13.s, z0.b, z6.b\n"
".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
".inst 0x4506989d // smmla z29.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507980a // smmla z10.s, z0.b, z7.b\n"
".inst 0x45079852 // smmla z18.s, z2.b, z7.b\n"
".inst 0x4507989a // smmla z26.s, z4.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506980e // smmla z14.s, z0.b, z6.b\n"
".inst 0x45069856 // smmla z22.s, z2.b, z6.b\n"
".inst 0x4506989e // smmla z30.s, z4.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
- "addvl x9, x9, #8\n"
".inst 0x45079853 // smmla z19.s, z2.b, z7.b\n"
".inst 0x4507989b // smmla z27.s, z4.b, z7.b\n"
".inst 0x4506980f // smmla z15.s, z0.b, z6.b\n"
".inst 0x45069857 // smmla z23.s, z2.b, z6.b\n"
".inst 0x4506989f // smmla z31.s, z4.b, z6.b\n"
"ble 74f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
".inst 0x45079828 // smmla z8.s, z1.b, z7.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
".inst 0x45079870 // smmla z16.s, z3.b, z7.b\n"
".inst 0x450798b8 // smmla z24.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
".inst 0x4506982c // smmla z12.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
".inst 0x45069874 // smmla z20.s, z3.b, z6.b\n"
".inst 0x450698bc // smmla z28.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #3, MUL VL]\n"
".inst 0x45079829 // smmla z9.s, z1.b, z7.b\n"
".inst 0x45079871 // smmla z17.s, z3.b, z7.b\n"
".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
".inst 0x4506982d // smmla z13.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #4, MUL VL]\n"
".inst 0x45069875 // smmla z21.s, z3.b, z6.b\n"
".inst 0x450698bd // smmla z29.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x4507982a // smmla z10.s, z1.b, z7.b\n"
".inst 0x45079872 // smmla z18.s, z3.b, z7.b\n"
".inst 0x450798ba // smmla z26.s, z5.b, z7.b\n"
+ "ld1b { z7.b }, p2/Z, [x28, #6, MUL VL]\n"
".inst 0x4506982e // smmla z14.s, z1.b, z6.b\n"
- "ld1b { z7.b }, p2/Z, [x9, #6, MUL VL]\n"
".inst 0x45069876 // smmla z22.s, z3.b, z6.b\n"
".inst 0x450698be // smmla z30.s, z5.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
".inst 0x4507982b // smmla z11.s, z1.b, z7.b\n"
".inst 0x45079873 // smmla z19.s, z3.b, z7.b\n"
".inst 0x450798bb // smmla z27.s, z5.b, z7.b\n"
@@ -2091,33 +2091,33 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x45069877 // smmla z23.s, z3.b, z6.b\n"
".inst 0x450698bf // smmla z31.s, z5.b, z6.b\n"
"74:" // Height 6: Multiply loop: multiply skip
- "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x28, x28, #0x1\n"
- "cmp x28, x20\n"
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
"bne 69b\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x24, x11, x20\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "ld1w { z0.s }, p2/Z, [x11]\n"
+ "add x23, x9, x19\n"
"uzp1 z12.d, z9.d, z13.d\n"
+ "ld1w { z1.s }, p2/Z, [x11, #1, MUL VL]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x23, x24, x20\n"
- "ld1w { z0.s }, p2/Z, [x14]\n"
+ "ld1w { z2.s }, p2/Z, [x11, #2, MUL VL]\n"
+ "add x22, x23, x19\n"
"uzp1 z13.d, z10.d, z14.d\n"
+ "ld1w { z3.s }, p2/Z, [x11, #3, MUL VL]\n"
+ "add x21, x22, x19\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #1, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "add x20, x21, x19\n"
"uzp1 z14.d, z11.d, z15.d\n"
+ "add x19, x20, x19\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "ld1w { z3.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x22, x23, x20\n"
+ "addvl x11, x11, #4\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x21, x22, x20\n"
- "add x20, x21, x20\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
- "addvl x14, x14, #4\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
@@ -2163,20 +2163,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ld1w { z2.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z6.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z3.s }, p2/Z, [x12, #3, MUL VL]\n"
- "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "ld1w { z7.s }, p2/Z, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
"b 76f\n"
"75:" // Height 6: per layer parameters
- "add x26, %x[qp], %[per_layer_right_shift]\n"
- "add x25, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z0.s }, p2/Z, [x26]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
+ "add x24, %x[qp], %[per_layer_right_shift]\n"
+ "ld1rw { z0.s }, p2/Z, [x24]\n"
"mov z1.d, z0.d\n"
- "mov z5.d, z4.d\n"
+ "add x24, %x[qp], %[per_layer_mul]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
"mov z2.d, z0.d\n"
- "mov z6.d, z4.d\n"
"mov z3.d, z0.d\n"
+ "mov z5.d, z4.d\n"
+ "mov z6.d, z4.d\n"
"mov z7.d, z4.d\n"
"76:" // Height 6: parameters loaded
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
@@ -2207,223 +2207,223 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"and z4.d, z31.d, z0.d\n"
"and z5.d, z12.d, z1.d\n"
"and z6.d, z13.d, z2.d\n"
- "and z7.d, z14.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z31.s, z31.s, z4.s\n"
"sqadd z12.s, z12.s, z5.s\n"
"sqadd z13.s, z13.s, z6.s\n"
- "sqadd z14.s, z14.s, z7.s\n"
+ "and z7.d, z14.d, z3.d\n"
"and z4.d, z8.d, z0.d\n"
"and z5.d, z9.d, z1.d\n"
- "and z6.d, z10.d, z2.d\n"
- "and z7.d, z11.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z14.s, z14.s, z7.s\n"
"sqadd z8.s, z8.s, z4.s\n"
"sqadd z9.s, z9.s, z5.s\n"
+ "and z6.d, z10.d, z2.d\n"
+ "and z7.d, z11.d, z3.d\n"
+ "and z4.d, z15.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z10.s, z10.s, z6.s\n"
"sqadd z11.s, z11.s, z7.s\n"
- "and z4.d, z15.d, z0.d\n"
+ "sqadd z15.s, z15.s, z4.s\n"
"and z5.d, z20.d, z1.d\n"
"and z6.d, z21.d, z2.d\n"
"and z7.d, z22.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z15.s, z15.s, z4.s\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z6.s\n"
"sqadd z22.s, z22.s, z7.s\n"
"and z4.d, z16.d, z0.d\n"
"and z5.d, z17.d, z1.d\n"
"and z6.d, z18.d, z2.d\n"
- "and z7.d, z19.d, z3.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
- "sqadd z19.s, z19.s, z7.s\n"
+ "and z7.d, z19.d, z3.d\n"
"and z4.d, z23.d, z0.d\n"
"and z5.d, z28.d, z1.d\n"
- "and z6.d, z29.d, z2.d\n"
- "and z7.d, z30.d, z3.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
- "asr z6.s, z6.s, #0x1f\n"
- "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z19.s, z19.s, z7.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"sqadd z28.s, z28.s, z5.s\n"
+ "and z6.d, z29.d, z2.d\n"
+ "and z7.d, z30.d, z3.d\n"
+ "and z4.d, z24.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z29.s, z29.s, z6.s\n"
"sqadd z30.s, z30.s, z7.s\n"
- "and z4.d, z24.d, z0.d\n"
+ "sqadd z24.s, z24.s, z4.s\n"
"and z5.d, z25.d, z1.d\n"
"and z6.d, z26.d, z2.d\n"
"and z7.d, z27.d, z3.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z24.s, z24.s, z4.s\n"
"sqadd z25.s, z25.s, z5.s\n"
"sqadd z26.s, z26.s, z6.s\n"
"sqadd z27.s, z27.s, z7.s\n"
"77:" // Height 6: no shift correction
- "add x25, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x25]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "add x24, %x[qp], %[c_offset]\n"
+ "ld1rw { z4.s }, p2/Z, [x24]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
+ "add x24, %x[qp], %[minval]\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
+ "ld1rw { z5.s }, p2/Z, [x24]\n"
+ "add x24, %x[qp], %[maxval]\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
+ "ld1rw { z6.s }, p2/Z, [x24]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
"add z14.s, z14.s, z4.s\n"
"add z8.s, z8.s, z4.s\n"
- ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
- ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
- ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
- ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z15.s, z15.s, z4.s\n"
- ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
- ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
- ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
- ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
- ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
- ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z23.s, z23.s, z4.s\n"
- ".inst 0x4482883c // srshl z28.s, p2/M, z28.s, z1.s\n"
- ".inst 0x4482885d // srshl z29.s, p2/M, z29.s, z2.s\n"
- "add z28.s, z28.s, z4.s\n"
- "add z29.s, z29.s, z4.s\n"
- ".inst 0x4482887e // srshl z30.s, p2/M, z30.s, z3.s\n"
- ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z30.s, z30.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
- ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
- ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
- ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
- "add x25, %x[qp], %[maxval]\n"
- "ld1rw { z6.s }, p2/Z, [x25]\n"
- "add z27.s, z27.s, z4.s\n"
- "add x25, %x[qp], %[minval]\n"
- "ld1rw { z5.s }, p2/Z, [x25]\n"
"smin z31.s, p2/M, z31.s, z6.s\n"
"smin z12.s, p2/M, z12.s, z6.s\n"
"smin z13.s, p2/M, z13.s, z6.s\n"
"smin z14.s, p2/M, z14.s, z6.s\n"
- "smin z8.s, p2/M, z8.s, z6.s\n"
- "smin z9.s, p2/M, z9.s, z6.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smin z11.s, p2/M, z11.s, z6.s\n"
- "smin z15.s, p2/M, z15.s, z6.s\n"
- "smin z20.s, p2/M, z20.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z16.s, p2/M, z16.s, z6.s\n"
- "smin z17.s, p2/M, z17.s, z6.s\n"
- "smin z18.s, p2/M, z18.s, z6.s\n"
- "smin z19.s, p2/M, z19.s, z6.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z29.s, p2/M, z29.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "smin z24.s, p2/M, z24.s, z6.s\n"
- "smin z25.s, p2/M, z25.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z31.s, p2/M, z31.s, z5.s\n"
"smax z12.s, p2/M, z12.s, z5.s\n"
"smax z13.s, p2/M, z13.s, z5.s\n"
- "uzp1 z31.h, z31.h, z12.h\n"
"smax z14.s, p2/M, z14.s, z5.s\n"
- "smax z8.s, p2/M, z8.s, z5.s\n"
+ "smin z8.s, p2/M, z8.s, z6.s\n"
+ "uzp1 z31.h, z31.h, z12.h\n"
+ ".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
"uzp1 z12.h, z13.h, z14.h\n"
+ "smax z8.s, p2/M, z8.s, z5.s\n"
"uzp1 z31.b, z31.b, z12.b\n"
+ "st1b { z31.b }, p1, [x9]\n"
+ "add z9.s, z9.s, z4.s\n"
+ "addvl x9, x9, #1\n"
+ ".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
+ ".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
+ ".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
+ "smin z9.s, p2/M, z9.s, z6.s\n"
+ ".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
+ "add z10.s, z10.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
"smax z9.s, p2/M, z9.s, z5.s\n"
- "smax z10.s, p2/M, z10.s, z5.s\n"
+ "smin z10.s, p2/M, z10.s, z6.s\n"
+ "smin z11.s, p2/M, z11.s, z6.s\n"
+ "smin z15.s, p2/M, z15.s, z6.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z31.b }, p1, [x11]\n"
+ "smax z10.s, p2/M, z10.s, z5.s\n"
"smax z11.s, p2/M, z11.s, z5.s\n"
"smax z15.s, p2/M, z15.s, z5.s\n"
+ "smin z20.s, p2/M, z20.s, z6.s\n"
+ ".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
"uzp1 z9.h, z10.h, z11.h\n"
+ ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
"uzp1 z8.b, z8.b, z9.b\n"
+ "st1b { z8.b }, p1, [x23]\n"
+ "add z21.s, z21.s, z4.s\n"
"smax z20.s, p2/M, z20.s, z5.s\n"
- "smax z21.s, p2/M, z21.s, z5.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "smin z21.s, p2/M, z21.s, z6.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x24]\n"
+ "smin z22.s, p2/M, z22.s, z6.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "smax z21.s, p2/M, z21.s, z5.s\n"
+ ".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
"smax z22.s, p2/M, z22.s, z5.s\n"
- "smax z16.s, p2/M, z16.s, z5.s\n"
+ "smin z16.s, p2/M, z16.s, z6.s\n"
+ ".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
+ "add z17.s, z17.s, z4.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
+ "smax z16.s, p2/M, z16.s, z5.s\n"
+ "add z18.s, z18.s, z4.s\n"
"uzp1 z15.b, z15.b, z20.b\n"
+ "st1b { z15.b }, p1, [x22]\n"
+ "smin z17.s, p2/M, z17.s, z6.s\n"
+ "smin z18.s, p2/M, z18.s, z6.s\n"
+ ".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x4482883c // srshl z28.s, p2/M, z28.s, z1.s\n"
"smax z17.s, p2/M, z17.s, z5.s\n"
- "smax z18.s, p2/M, z18.s, z5.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z28.s, z28.s, z4.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x23]\n"
+ "smax z18.s, p2/M, z18.s, z5.s\n"
+ "smin z19.s, p2/M, z19.s, z6.s\n"
+ "smin z23.s, p2/M, z23.s, z6.s\n"
+ "smin z28.s, p2/M, z28.s, z6.s\n"
+ ".inst 0x4482885d // srshl z29.s, p2/M, z29.s, z2.s\n"
"smax z19.s, p2/M, z19.s, z5.s\n"
"smax z23.s, p2/M, z23.s, z5.s\n"
+ "smax z28.s, p2/M, z28.s, z5.s\n"
+ "add z29.s, z29.s, z4.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ ".inst 0x4482887e // srshl z30.s, p2/M, z30.s, z3.s\n"
+ "uzp1 z23.h, z23.h, z28.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "smax z28.s, p2/M, z28.s, z5.s\n"
+ "st1b { z16.b }, p1, [x21]\n"
+ "add z30.s, z30.s, z4.s\n"
+ "smin z29.s, p2/M, z29.s, z6.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ ".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
+ "smin z30.s, p2/M, z30.s, z6.s\n"
"smax z29.s, p2/M, z29.s, z5.s\n"
- "uzp1 z23.h, z23.h, z28.h\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z4.s\n"
"smax z30.s, p2/M, z30.s, z5.s\n"
- "smax z24.s, p2/M, z24.s, z5.s\n"
+ "smin z24.s, p2/M, z24.s, z6.s\n"
+ "smin z25.s, p2/M, z25.s, z6.s\n"
+ ".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
"uzp1 z28.h, z29.h, z30.h\n"
+ "smax z24.s, p2/M, z24.s, z5.s\n"
"uzp1 z23.b, z23.b, z28.b\n"
+ "st1b { z23.b }, p1, [x20]\n"
+ "add z26.s, z26.s, z4.s\n"
"smax z25.s, p2/M, z25.s, z5.s\n"
- "smax z26.s, p2/M, z26.s, z5.s\n"
+ ".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
+ "smin z26.s, p2/M, z26.s, z6.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z23.b }, p1, [x21]\n"
+ "add z27.s, z27.s, z4.s\n"
+ "smax z26.s, p2/M, z26.s, z5.s\n"
+ "smin z27.s, p2/M, z27.s, z6.s\n"
"smax z27.s, p2/M, z27.s, z5.s\n"
"uzp1 z25.h, z26.h, z27.h\n"
"uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p1, [x20]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z24.b }, p1, [x19]\n"
"78:" // Height 6: Writeback done
"decw x10, ALL, MUL #4\n"
"cmp x10, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
- "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 79f\n"
- "add x21, x21, #0x6\n"
- "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"79:" // Update direct input
- "mov x20, #0x6\n"
- "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "mov x19, #0x6\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
: [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
: [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}