From 8a164884dddf769643cf3b9f7f94e43cb4f3c20b Mon Sep 17 00:00:00 2001 From: ramelg01 Date: Thu, 7 Apr 2022 02:42:52 +0100 Subject: =?UTF-8?q?Update=20Neon=E2=84=A2=20depthwise=20kernel?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Reduce duplication and simplify overall structure. - Improve multi-threaded performance by sharing more data in lower-level caches. Partially Resolves: COMPMID-5054 Signed-off-by: Ramy Elgammal Change-Id: Iac747f39b21c540122fa75218762631c4d787911 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7449 Tested-by: Arm Jenkins Reviewed-by: Andrew Mundy Reviewed-by: Sheri Zhang Comments-Addressed: Arm Jenkins --- .../generic.cpp | 1740 ++++++++++---------- 1 file changed, 857 insertions(+), 883 deletions(-) (limited to 'src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp') diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp index 8d22836e5b..872f665b40 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const uint8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, - const uint8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -100,75 +100,75 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( - "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x5, #0x0\n" - "ldr x6, [%x[params], %[offsetof_Params_weights]]\n" - "mov x7, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x8, %x[params], %[offsetof_Params_inptrs]\n" - "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x16, x4, #0x3\n" - "ldr x15, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x19, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v12.16b }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v13.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v11.4s }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v19.4s }, [x20]\n" - "ld1r { v14.4s }, [x19]\n" - "ldp x14, x13, [x21, #0x0]\n" - "ldp x12, x11, [x21, #0x10]\n" + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x24, x19, %[offsetof_Requantize32_a_offset]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x21, x19, %[offsetof_Requantize32_c_offset]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ld1r { v12.16b }, [x24]\n" + "ld1r { v13.16b }, [x23]\n" + "lsr x16, x8, #0x3\n" + "ld1r { v11.8h }, [x21]\n" + "ld1r { v17.8h }, [x20]\n" + "mov x15, #0x0\n" + "mov x14, #0x0\n" + "ld1r { v14.8h }, [x19]\n" + "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" + "add x12, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x10, x9, [x22, #0x0]\n" + "ldp x28, x27, [x22, #0x10]\n" "cbz x16, 3f\n" - "subs x16, x16, #0x1\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q15, [x19, #0x0]\n" - "mov v20.16b, v15.16b\n" + "subs x16, x16, #0x1\n" + "mov v9.16b, v15.16b\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v15.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v17.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v16.16b, v10.16b\n" + "mov v22.16b, v15.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v21.16b, v10.16b\n" + "mov v23.16b, v15.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v18.16b, v10.16b\n" "usubl v0.8h, v0.8b, v13.8b\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v22.16b, v10.16b\n" - "ldr d2, [x6, #0x10]\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" "usubl v1.8h, v1.8b, v13.8b\n" - "mov v18.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "ldr d4, [x6, #0x20]\n" "usubl v2.8h, v2.8b, v13.8b\n" - "ldr d5, [x6, #0x28]\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" "usubl v3.8h, v3.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" - "ldr d7, [x6, #0x38]\n" "usubl v4.8h, v4.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" "usubl v5.8h, v5.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" "usubl v6.8h, v6.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" "usubl v7.8h, v7.8b, v13.8b\n" "usubl v8.8h, v8.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" - "ldp x20, x19, [x8, #0x30]\n" - "ldr d31, [x26, x5]\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" "usubl v31.8h, v31.8b, v12.8b\n" - "ldr d30, [x25, x5]\n" - "ldr d29, [x24, x5]\n" "usubl v30.8h, v30.8b, v12.8b\n" - "ldr d28, [x23, x5]\n" - "ldr d27, [x22, x5]\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "ldr d26, [x21, x5]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "ldr d25, [x20, x5]\n" - "ldr d24, [x19, x5]\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" "usubl v26.8h, v26.8b, v12.8b\n" "usubl v25.8h, v25.8b, v12.8b\n" @@ -176,259 +176,251 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "beq 2f\n" "1:" // Loop "smlal v15.4s, v31.4h, v8.4h\n" - "ldr x23, [x8, #0x40]\n" - "add x6, x6, #0x48\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x22, [x8, #0x48]\n" - "subs x16, x16, #0x1\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "ldr x21, [x8, #0x50]\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "ldr x20, [x8, #0x58]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x19, [x8, #0x60]\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "ldr x10, [x8, #0x68]\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "ldr x9, [x8, #0x70]\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x8, #0x78]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v30.4h, v0.4h\n" - "ldr x27, [x8, #0x80]\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x8, #0x88]\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "ldr x25, [x8, #0x90]\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x5]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" "usubl v28.8h, v28.8b, v12.8b\n" "smlal v15.4s, v29.4h, v1.4h\n" - "ldr x24, [x8, #0x98]\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x23, x5]\n" + "ldr d29, [x24, x15]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "ldr x23, [x8, #0xa0]\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x5]\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" "smlal v15.4s, v26.4h, v3.4h\n" - "ldr x22, [x8, #0xa8]\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x20, x5]\n" + "ldr d26, [x19, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v4.4h\n" - "ldr x21, [x8, #0xb0]\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x19, x5]\n" + "ldr d25, [x20, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" "smlal v15.4s, v24.4h, v2.4h\n" - "ldr x20, [x8, #0xb8]\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr x19, [x8, #0xc0]\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "ldr q21, [x17, #0x0]\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" - "ldr d24, [x9, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "ldr q30, [x15, #0x0]\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x5]\n" + "ldr x19, [x12, #0x70]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "ldr q31, [x17, #0x10]\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x5]\n" - "add x17, x17, #0x20\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" "smlal v15.4s, v27.4h, v5.4h\n" - "ldr q9, [x15, #0x10]\n" - "add x15, x15, #0x20\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x5]\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x5]\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x5]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x5]\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" - "smlal2 v18.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x5]\n" - "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "smlal v17.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x5]\n" - "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x5]\n" - "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x5]\n" - "add x5, x5, #0x8\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "usubl v29.8h, v29.8b, v12.8b\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v24.4h, v3.4h\n" - "smlal v16.4s, v24.4h, v5.4h\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "add x17, x17, #0x48\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "subs x16, x16, #0x1\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x13, x13, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "add x11, x11, #0x20\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "and v8.16b, v10.16b, v9.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "and v4.16b, v20.16b, v30.16b\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "add v15.4s, v15.4s, v11.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smax v20.4s, v20.4s, v19.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x14, x7]\n" - "smax v23.4s, v23.4s, v19.4s\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "str d20, [x13, x7]\n" - "smin v16.4s, v16.4s, v14.4s\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "smax v16.4s, v16.4s, v19.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "add v22.4s, v22.4s, v11.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x12, x7]\n" - "smin v17.4s, v17.4s, v14.4s\n" - "add v18.4s, v18.4s, v11.4s\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x11, x7]\n" - "add x7, x7, #0x8\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q15, [x19, #0x0]\n" - "mov v20.16b, v15.16b\n" + "add x14, x14, #0x8\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v15.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v17.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v22.16b, v15.16b\n" + "mov v21.16b, v10.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v15.16b\n" + "mov v18.16b, v10.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" "usubl v0.8h, v0.8b, v13.8b\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v22.16b, v10.16b\n" - "ldr d2, [x6, #0x10]\n" "usubl v1.8h, v1.8b, v13.8b\n" - "mov v18.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "ldr d4, [x6, #0x20]\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" "usubl v2.8h, v2.8b, v13.8b\n" - "ldr d5, [x6, #0x28]\n" "usubl v3.8h, v3.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" - "ldr d7, [x6, #0x38]\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" "usubl v4.8h, v4.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" "usubl v5.8h, v5.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" "usubl v6.8h, v6.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" "usubl v7.8h, v7.8b, v13.8b\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" "usubl v8.8h, v8.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" - "ldp x20, x19, [x8, #0x30]\n" - "ldr d31, [x26, x5]\n" "usubl v31.8h, v31.8b, v12.8b\n" - "ldr d30, [x25, x5]\n" - "ldr d29, [x24, x5]\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" "usubl v30.8h, v30.8b, v12.8b\n" - "ldr d28, [x23, x5]\n" - "ldr d27, [x22, x5]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "ldr d26, [x21, x5]\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "ldr d25, [x20, x5]\n" - "ldr d24, [x19, x5]\n" "usubl v27.8h, v27.8b, v12.8b\n" "usubl v26.8h, v26.8b, v12.8b\n" "usubl v25.8h, v25.8b, v12.8b\n" @@ -436,275 +428,267 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "bgt 1b\n" "2:" // Tail "smlal v15.4s, v31.4h, v8.4h\n" - "ldr x23, [x8, #0x40]\n" - "tst x4, #0x7\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x22, [x8, #0x48]\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "ldr x21, [x8, #0x50]\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "ldr x20, [x8, #0x58]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x19, [x8, #0x60]\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "ldr x10, [x8, #0x68]\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "ldr x9, [x8, #0x70]\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x8, #0x78]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v30.4h, v0.4h\n" - "ldr x27, [x8, #0x80]\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x8, #0x88]\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "ldr x25, [x8, #0x90]\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x5]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" "usubl v28.8h, v28.8b, v12.8b\n" "smlal v15.4s, v29.4h, v1.4h\n" - "ldr x24, [x8, #0x98]\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x23, x5]\n" + "ldr d29, [x24, x15]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "ldr x23, [x8, #0xa0]\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x5]\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" "smlal v15.4s, v26.4h, v3.4h\n" - "ldr x22, [x8, #0xa8]\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x20, x5]\n" + "ldr d26, [x19, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v4.4h\n" - "ldr x21, [x8, #0xb0]\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x19, x5]\n" + "ldr d25, [x20, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" "smlal v15.4s, v24.4h, v2.4h\n" - "ldr x20, [x8, #0xb8]\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr x19, [x8, #0xc0]\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "ldr q21, [x17, #0x0]\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" - "ldr d24, [x9, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "ldr q30, [x15, #0x0]\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x5]\n" + "ldr x19, [x12, #0x70]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "ldr q31, [x17, #0x10]\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x5]\n" - "add x17, x17, #0x20\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" "smlal v15.4s, v27.4h, v5.4h\n" - "ldr q9, [x15, #0x10]\n" - "add x15, x15, #0x20\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x5]\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x5]\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x5]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x5]\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" - "smlal2 v18.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x5]\n" - "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "smlal v17.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x5]\n" - "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x5]\n" - "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x5]\n" - "add x5, x5, #0x8\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "usubl v29.8h, v29.8b, v12.8b\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v24.4h, v3.4h\n" - "smlal v16.4s, v24.4h, v5.4h\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "tst x8, #0x7\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "add x13, x13, #0x20\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x11, x11, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "and v8.16b, v10.16b, v9.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "and v4.16b, v20.16b, v30.16b\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "add v15.4s, v15.4s, v11.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smax v20.4s, v20.4s, v19.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x14, x7]\n" - "smax v23.4s, v23.4s, v19.4s\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "str d20, [x13, x7]\n" - "smin v16.4s, v16.4s, v14.4s\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "smax v16.4s, v16.4s, v19.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "add v22.4s, v22.4s, v11.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x12, x7]\n" - "smin v17.4s, v17.4s, v14.4s\n" - "add v18.4s, v18.4s, v11.4s\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x11, x7]\n" - "add x7, x7, #0x8\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" + "add x14, x14, #0x8\n" "beq 88f\n" - "add x6, x6, #0x48\n" + "add x17, x17, #0x48\n" "3:" // Oddments "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "tbz x4, #2, 5f\n" + "tbz x8, #2, 5f\n" "ld1 { v15.4s }, [x19], #0x10\n" - "tbz x4, #1, 4f\n" + "tbz x8, #1, 4f\n" "ld1 { v10.d }[0], [x19], #0x8\n" - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset - "tbz x4, #1, 6f\n" + "tbz x8, #1, 6f\n" "ld1 { v15.d }[0], [x19], #0x8\n" - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v15.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v15.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v20.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v16.16b, v15.16b\n" - "ldr d2, [x6, #0x10]\n" - "mov v22.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "mov v17.16b, v15.16b\n" - "ldr d4, [x6, #0x20]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v22.16b, v15.16b\n" + "mov v21.16b, v10.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v23.16b, v15.16b\n" "mov v18.16b, v10.16b\n" - "ldr d5, [x6, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "usubl v0.8h, v0.8b, v13.8b\n" "usubl v1.8h, v1.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" + "ldr d8, [x17, #0x40]\n" + "ldp x26, x25, [x12, #0x0]\n" "usubl v2.8h, v2.8b, v13.8b\n" - "ldr d7, [x6, #0x38]\n" "usubl v3.8h, v3.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" + "ldp x24, x23, [x12, #0x10]\n" + "ldp x22, x21, [x12, #0x20]\n" "usubl v4.8h, v4.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" "usubl v5.8h, v5.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" + "ldp x20, x19, [x12, #0x30]\n" "usubl v6.8h, v6.8b, v13.8b\n" "usubl v7.8h, v7.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" "usubl v8.8h, v8.8b, v13.8b\n" - "ldp x20, x19, [x8, #0x30]\n" - "add x26, x26, x5\n" - "add x25, x25, x5\n" - "add x24, x24, x5\n" - "add x23, x23, x5\n" - "add x22, x22, x5\n" - "add x21, x21, x5\n" - "add x20, x20, x5\n" - "add x19, x19, x5\n" - "tbz x4, #2, 9f\n" + "add x26, x26, x15\n" + "add x25, x25, x15\n" + "add x24, x24, x15\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" + "tbz x8, #2, 9f\n" "ld1 { v31.s }[0], [x26], #0x4\n" "ld1 { v30.s }[0], [x25], #0x4\n" "ld1 { v29.s }[0], [x24], #0x4\n" @@ -713,7 +697,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.s }[0], [x21], #0x4\n" "ld1 { v25.s }[0], [x20], #0x4\n" "ld1 { v24.s }[0], [x19], #0x4\n" - "tbz x4, #1, 8f\n" + "tbz x8, #1, 8f\n" "ld1 { v31.h }[2], [x26], #0x2\n" "ld1 { v30.h }[2], [x25], #0x2\n" "ld1 { v29.h }[2], [x24], #0x2\n" @@ -722,7 +706,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[2], [x21], #0x2\n" "ld1 { v25.h }[2], [x20], #0x2\n" "ld1 { v24.h }[2], [x19], #0x2\n" - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[6], [x26]\n" "ld1 { v30.b }[6], [x25]\n" "ld1 { v29.b }[6], [x24]\n" @@ -733,7 +717,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[6], [x19]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[4], [x26]\n" "ld1 { v30.b }[4], [x25]\n" "ld1 { v29.b }[4], [x24]\n" @@ -744,7 +728,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[4], [x19]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset - "tbz x4, #1, 10f\n" + "tbz x8, #1, 10f\n" "ld1 { v31.h }[0], [x26], #0x2\n" "ld1 { v30.h }[0], [x25], #0x2\n" "ld1 { v29.h }[0], [x24], #0x2\n" @@ -753,7 +737,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[0], [x21], #0x2\n" "ld1 { v25.h }[0], [x20], #0x2\n" "ld1 { v24.h }[0], [x19], #0x2\n" - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[2], [x26]\n" "ld1 { v30.b }[2], [x25]\n" "ld1 { v29.b }[2], [x24]\n" @@ -764,7 +748,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[2], [x19]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[0], [x26]\n" "ld1 { v30.b }[0], [x25]\n" "ld1 { v29.b }[0], [x24]\n" @@ -774,646 +758,636 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v25.b }[0], [x20]\n" "ld1 { v24.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "ldr x23, [x8, #0x40]\n" "usubl v31.8h, v31.8b, v12.8b\n" "smlal v15.4s, v31.4h, v8.4h\n" - "usubl v30.8h, v30.8b, v12.8b\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "usubl v28.8h, v28.8b, v12.8b\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "usubl v26.8h, v26.8b, v12.8b\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "add x23, x23, x5\n" + "ldr x24, [x12, #0x40]\n" + "usubl v30.8h, v30.8b, v12.8b\n" "smlal v15.4s, v30.4h, v0.4h\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" + "add x24, x24, x15\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" "smlal v15.4s, v29.4h, v1.4h\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" "smlal v15.4s, v26.4h, v3.4h\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" "smlal v15.4s, v25.4h, v4.4h\n" "smlal2 v10.4s, v25.8h, v4.8h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "tbz x4, #2, 13f\n" - "ld1 { v29.s }[0], [x23], #0x4\n" - "tbz x4, #1, 12f\n" - "ld1 { v29.h }[2], [x23], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[6], [x23]\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "tbz x8, #2, 13f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 12f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[6], [x24]\n" "b 15f\n" "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[4], [x23]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[4], [x24]\n" "b 15f\n" "13:" // Oddments: Load (1, 3): Bit 2: Unset - "tbz x4, #1, 14f\n" - "ld1 { v29.h }[0], [x23], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[2], [x23]\n" + "tbz x8, #1, 14f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[2], [x24]\n" "b 15f\n" "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[0], [x23]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[0], [x24]\n" "15:" // Oddments: Load (1, 3): Bit 2: End - "ldr x22, [x8, #0x48]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "add x22, x22, x5\n" - "tbz x4, #2, 17f\n" - "ld1 { v28.s }[0], [x22], #0x4\n" - "tbz x4, #1, 16f\n" - "ld1 { v28.h }[2], [x22], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[6], [x22]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 17f\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "tbz x8, #1, 16f\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[6], [x23]\n" "b 19f\n" "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[4], [x22]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[4], [x23]\n" "b 19f\n" "17:" // Oddments: Load (1, 4): Bit 2: Unset - "tbz x4, #1, 18f\n" - "ld1 { v28.h }[0], [x22], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[2], [x22]\n" + "tbz x8, #1, 18f\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[2], [x23]\n" "b 19f\n" "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[0], [x22]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[0], [x23]\n" "19:" // Oddments: Load (1, 4): Bit 2: End - "ldr x21, [x8, #0x50]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "add x21, x21, x5\n" - "tbz x4, #2, 21f\n" + "ldr x21, [x12, #0x50]\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 21f\n" "ld1 { v27.s }[0], [x21], #0x4\n" - "tbz x4, #1, 20f\n" + "tbz x8, #1, 20f\n" "ld1 { v27.h }[2], [x21], #0x2\n" - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[6], [x21]\n" "b 23f\n" "20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[4], [x21]\n" "b 23f\n" "21:" // Oddments: Load (1, 2): Bit 2: Unset - "tbz x4, #1, 22f\n" + "tbz x8, #1, 22f\n" "ld1 { v27.h }[0], [x21], #0x2\n" - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[2], [x21]\n" "b 23f\n" "22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[0], [x21]\n" "23:" // Oddments: Load (1, 2): Bit 2: End - "ldr x20, [x8, #0x58]\n" "usubl v27.8h, v27.8b, v12.8b\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v27.4h, v5.4h\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "add x20, x20, x5\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "tbz x4, #2, 25f\n" - "ld1 { v26.s }[0], [x20], #0x4\n" - "tbz x4, #1, 24f\n" - "ld1 { v26.h }[2], [x20], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[6], [x20]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 25f\n" + "ld1 { v26.s }[0], [x19], #0x4\n" + "tbz x8, #1, 24f\n" + "ld1 { v26.h }[2], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[6], [x19]\n" "b 27f\n" "24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[4], [x20]\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[4], [x19]\n" "b 27f\n" "25:" // Oddments: Load (3, 0): Bit 2: Unset - "tbz x4, #1, 26f\n" - "ld1 { v26.h }[0], [x20], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[2], [x20]\n" + "tbz x8, #1, 26f\n" + "ld1 { v26.h }[0], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[2], [x19]\n" "b 27f\n" "26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[0], [x20]\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[0], [x19]\n" "27:" // Oddments: Load (3, 0): Bit 2: End - "ldr x19, [x8, #0x60]\n" "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "add x19, x19, x5\n" - "tbz x4, #2, 29f\n" - "ld1 { v25.s }[0], [x19], #0x4\n" - "tbz x4, #1, 28f\n" - "ld1 { v25.h }[2], [x19], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[6], [x19]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 29f\n" + "ld1 { v25.s }[0], [x20], #0x4\n" + "tbz x8, #1, 28f\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[6], [x20]\n" "b 31f\n" "28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[4], [x19]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[4], [x20]\n" "b 31f\n" "29:" // Oddments: Load (2, 0): Bit 2: Unset - "tbz x4, #1, 30f\n" - "ld1 { v25.h }[0], [x19], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[2], [x19]\n" + "tbz x8, #1, 30f\n" + "ld1 { v25.h }[0], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[2], [x20]\n" "b 31f\n" "30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[0], [x19]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[0], [x20]\n" "31:" // Oddments: Load (2, 0): Bit 2: End - "ldr x10, [x8, #0x68]\n" "usubl v25.8h, v25.8b, v12.8b\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v6.4h\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "add x10, x10, x5\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "tbz x4, #2, 33f\n" - "ld1 { v29.s }[0], [x10], #0x4\n" - "tbz x4, #1, 32f\n" - "ld1 { v29.h }[2], [x10], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[6], [x10]\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 33f\n" + "ld1 { v29.s }[0], [x19], #0x4\n" + "tbz x8, #1, 32f\n" + "ld1 { v29.h }[2], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[6], [x19]\n" "b 35f\n" "32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[4], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[4], [x19]\n" "b 35f\n" "33:" // Oddments: Load (3, 1): Bit 2: Unset - "tbz x4, #1, 34f\n" - "ld1 { v29.h }[0], [x10], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[2], [x10]\n" + "tbz x8, #1, 34f\n" + "ld1 { v29.h }[0], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[2], [x19]\n" "b 35f\n" "34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[0], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[0], [x19]\n" "35:" // Oddments: Load (3, 1): Bit 2: End - "ldr x9, [x8, #0x70]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "add x9, x9, x5\n" - "tbz x4, #2, 37f\n" - "ld1 { v24.s }[0], [x9], #0x4\n" - "tbz x4, #1, 36f\n" - "ld1 { v24.h }[2], [x9], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[6], [x9]\n" + "ldr x19, [x12, #0x70]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 37f\n" + "ld1 { v24.s }[0], [x19], #0x4\n" + "tbz x8, #1, 36f\n" + "ld1 { v24.h }[2], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[6], [x19]\n" "b 39f\n" "36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[4], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[4], [x19]\n" "b 39f\n" "37:" // Oddments: Load (2, 1): Bit 2: Unset - "tbz x4, #1, 38f\n" - "ld1 { v24.h }[0], [x9], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[2], [x9]\n" + "tbz x8, #1, 38f\n" + "ld1 { v24.h }[0], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[2], [x19]\n" "b 39f\n" "38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[0], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[0], [x19]\n" "39:" // Oddments: Load (2, 1): Bit 2: End - "ldr x28, [x8, #0x78]\n" "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x22, [x12, #0x78]\n" "smlal v15.4s, v24.4h, v7.4h\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "add x28, x28, x5\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "tbz x4, #2, 41f\n" - "ld1 { v27.s }[0], [x28], #0x4\n" - "tbz x4, #1, 40f\n" - "ld1 { v27.h }[2], [x28], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[6], [x28]\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 41f\n" + "ld1 { v27.s }[0], [x22], #0x4\n" + "tbz x8, #1, 40f\n" + "ld1 { v27.h }[2], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[6], [x22]\n" "b 43f\n" "40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[4], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[4], [x22]\n" "b 43f\n" "41:" // Oddments: Load (3, 3): Bit 2: Unset - "tbz x4, #1, 42f\n" - "ld1 { v27.h }[0], [x28], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[2], [x28]\n" + "tbz x8, #1, 42f\n" + "ld1 { v27.h }[0], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[2], [x22]\n" "b 43f\n" "42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[0], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[0], [x22]\n" "43:" // Oddments: Load (3, 3): Bit 2: End - "ldr x27, [x8, #0x80]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" + "ldr x21, [x12, #0x80]\n" + "smlal v23.4s, v27.4h, v4.4h\n" "smlal2 v18.4s, v27.8h, v4.8h\n" - "add x27, x27, x5\n" - "tbz x4, #2, 45f\n" - "ld1 { v28.s }[0], [x27], #0x4\n" - "tbz x4, #1, 44f\n" - "ld1 { v28.h }[2], [x27], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[6], [x27]\n" + "add x21, x21, x15\n" + "tbz x8, #2, 45f\n" + "ld1 { v28.s }[0], [x21], #0x4\n" + "tbz x8, #1, 44f\n" + "ld1 { v28.h }[2], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[6], [x21]\n" "b 47f\n" "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[4], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[4], [x21]\n" "b 47f\n" "45:" // Oddments: Load (2, 3): Bit 2: Unset - "tbz x4, #1, 46f\n" - "ld1 { v28.h }[0], [x27], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[2], [x27]\n" + "tbz x8, #1, 46f\n" + "ld1 { v28.h }[0], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[2], [x21]\n" "b 47f\n" "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[0], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[0], [x21]\n" "47:" // Oddments: Load (2, 3): Bit 2: End - "ldr x26, [x8, #0x88]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "add x26, x26, x5\n" - "smlal v17.4s, v28.4h, v1.4h\n" + "ldr x20, [x12, #0x88]\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal2 v18.4s, v28.8h, v1.8h\n" - "tbz x4, #2, 49f\n" - "ld1 { v26.s }[0], [x26], #0x4\n" - "tbz x4, #1, 48f\n" - "ld1 { v26.h }[2], [x26], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[6], [x26]\n" + "add x20, x20, x15\n" + "tbz x8, #2, 49f\n" + "ld1 { v26.s }[0], [x20], #0x4\n" + "tbz x8, #1, 48f\n" + "ld1 { v26.h }[2], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[6], [x20]\n" "b 51f\n" "48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[4], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[4], [x20]\n" "b 51f\n" "49:" // Oddments: Load (3, 4): Bit 2: Unset - "tbz x4, #1, 50f\n" - "ld1 { v26.h }[0], [x26], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[2], [x26]\n" + "tbz x8, #1, 50f\n" + "ld1 { v26.h }[0], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[2], [x20]\n" "b 51f\n" "50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[0], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[0], [x20]\n" "51:" // Oddments: Load (3, 4): Bit 2: End - "ldr x25, [x8, #0x90]\n" "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "ldr x23, [x12, #0x90]\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "add x25, x25, x5\n" - "tbz x4, #2, 53f\n" - "ld1 { v25.s }[0], [x25], #0x4\n" - "tbz x4, #1, 52f\n" - "ld1 { v25.h }[2], [x25], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[6], [x25]\n" + "add x23, x23, x15\n" + "tbz x8, #2, 53f\n" + "ld1 { v25.s }[0], [x23], #0x4\n" + "tbz x8, #1, 52f\n" + "ld1 { v25.h }[2], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[6], [x23]\n" "b 55f\n" "52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[4], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[4], [x23]\n" "b 55f\n" "53:" // Oddments: Load (4, 0): Bit 2: Unset - "tbz x4, #1, 54f\n" - "ld1 { v25.h }[0], [x25], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[2], [x25]\n" + "tbz x8, #1, 54f\n" + "ld1 { v25.h }[0], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[2], [x23]\n" "b 55f\n" "54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[0], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[0], [x23]\n" "55:" // Oddments: Load (4, 0): Bit 2: End - "ldr x24, [x8, #0x98]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "add x24, x24, x5\n" - "tbz x4, #2, 57f\n" + "ldr x24, [x12, #0x98]\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "add x24, x24, x15\n" + "tbz x8, #2, 57f\n" "ld1 { v29.s }[0], [x24], #0x4\n" - "tbz x4, #1, 56f\n" + "tbz x8, #1, 56f\n" "ld1 { v29.h }[2], [x24], #0x2\n" - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[6], [x24]\n" "b 59f\n" "56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[4], [x24]\n" "b 59f\n" "57:" // Oddments: Load (2, 4): Bit 2: Unset - "tbz x4, #1, 58f\n" + "tbz x8, #1, 58f\n" "ld1 { v29.h }[0], [x24], #0x2\n" - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[2], [x24]\n" "b 59f\n" "58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[0], [x24]\n" "59:" // Oddments: Load (2, 4): Bit 2: End - "ldr x23, [x8, #0xa0]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "add x23, x23, x5\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "ldr x19, [x12, #0xa0]\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "smlal v23.4s, v29.4h, v2.4h\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "tbz x4, #2, 61f\n" - "ld1 { v27.s }[0], [x23], #0x4\n" - "tbz x4, #1, 60f\n" - "ld1 { v27.h }[2], [x23], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[6], [x23]\n" + "add x19, x19, x15\n" + "tbz x8, #2, 61f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x8, #1, 60f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[6], [x19]\n" "b 63f\n" "60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[4], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[4], [x19]\n" "b 63f\n" "61:" // Oddments: Load (4, 1): Bit 2: Unset - "tbz x4, #1, 62f\n" - "ld1 { v27.h }[0], [x23], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[2], [x23]\n" + "tbz x8, #1, 62f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[2], [x19]\n" "b 63f\n" "62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[0], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[0], [x19]\n" "63:" // Oddments: Load (4, 1): Bit 2: End - "ldr x22, [x8, #0xa8]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "add x22, x22, x5\n" - "tbz x4, #2, 65f\n" + "ldr x22, [x12, #0xa8]\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 65f\n" "ld1 { v24.s }[0], [x22], #0x4\n" - "tbz x4, #1, 64f\n" + "tbz x8, #1, 64f\n" "ld1 { v24.h }[2], [x22], #0x2\n" - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[6], [x22]\n" "b 67f\n" "64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[4], [x22]\n" "b 67f\n" "65:" // Oddments: Load (3, 2): Bit 2: Unset - "tbz x4, #1, 66f\n" + "tbz x8, #1, 66f\n" "ld1 { v24.h }[0], [x22], #0x2\n" - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[2], [x22]\n" "b 67f\n" "66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[0], [x22]\n" "67:" // Oddments: Load (3, 2): Bit 2: End - "ldr x21, [x8, #0xb0]\n" "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v16.4s, v24.4h, v5.4h\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "add x21, x21, x5\n" - "smlal v17.4s, v24.4h, v3.4h\n" + "ldr x21, [x12, #0xb0]\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" + "smlal v23.4s, v24.4h, v3.4h\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "tbz x4, #2, 69f\n" + "add x21, x21, x15\n" + "tbz x8, #2, 69f\n" "ld1 { v26.s }[0], [x21], #0x4\n" - "tbz x4, #1, 68f\n" + "tbz x8, #1, 68f\n" "ld1 { v26.h }[2], [x21], #0x2\n" - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[6], [x21]\n" "b 71f\n" "68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[4], [x21]\n" "b 71f\n" "69:" // Oddments: Load (4, 3): Bit 2: Unset - "tbz x4, #1, 70f\n" + "tbz x8, #1, 70f\n" "ld1 { v26.h }[0], [x21], #0x2\n" - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[2], [x21]\n" "b 71f\n" "70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[0], [x21]\n" "71:" // Oddments: Load (4, 3): Bit 2: End - "ldr x20, [x8, #0xb8]\n" "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "ldr x20, [x12, #0xb8]\n" + "smlal v23.4s, v26.4h, v7.4h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "add x20, x20, x5\n" - "tbz x4, #2, 73f\n" + "add x20, x20, x15\n" + "tbz x8, #2, 73f\n" "ld1 { v25.s }[0], [x20], #0x4\n" - "tbz x4, #1, 72f\n" + "tbz x8, #1, 72f\n" "ld1 { v25.h }[2], [x20], #0x2\n" - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[6], [x20]\n" "b 75f\n" "72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[4], [x20]\n" "b 75f\n" "73:" // Oddments: Load (4, 2): Bit 2: Unset - "tbz x4, #1, 74f\n" + "tbz x8, #1, 74f\n" "ld1 { v25.h }[0], [x20], #0x2\n" - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[2], [x20]\n" "b 75f\n" "74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[0], [x20]\n" "75:" // Oddments: Load (4, 2): Bit 2: End - "ldr x19, [x8, #0xc0]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "add x19, x19, x5\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" + "smlal v23.4s, v25.4h, v6.4h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "tbz x4, #2, 77f\n" + "add x19, x19, x15\n" + "tbz x8, #2, 77f\n" "ld1 { v29.s }[0], [x19], #0x4\n" - "tbz x4, #1, 76f\n" + "tbz x8, #1, 76f\n" "ld1 { v29.h }[2], [x19], #0x2\n" - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[6], [x19]\n" "b 79f\n" "76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[4], [x19]\n" "b 79f\n" "77:" // Oddments: Load (4, 4): Bit 2: Unset - "tbz x4, #1, 78f\n" + "tbz x8, #1, 78f\n" "ld1 { v29.h }[0], [x19], #0x2\n" - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[2], [x19]\n" "b 79f\n" "78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[0], [x19]\n" "79:" // Oddments: Load (4, 4): Bit 2: End "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "tbz x4, #2, 81f\n" - "ld1 { v21.4s }, [x17], #0x10\n" - "ld1 { v30.4s }, [x15], #0x10\n" - "tbz x4, #1, 80f\n" - "ld1 { v31.d }[0], [x17], #0x8\n" - "ld1 { v9.d }[0], [x15], #0x8\n" - "tbz x4, #0, 83f\n" - "ld1 { v31.s }[2], [x17]\n" - "ld1 { v9.s }[2], [x15]\n" + "tbz x8, #2, 81f\n" + "ld1 { v19.4s }, [x13], #0x10\n" + "ld1 { v0.4s }, [x11], #0x10\n" + "tbz x8, #1, 80f\n" + "ld1 { v4.d }[0], [x13], #0x8\n" + "ld1 { v31.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[2], [x13]\n" + "ld1 { v31.s }[2], [x11]\n" "b 83f\n" "80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v31.s }[0], [x17]\n" - "ld1 { v9.s }[0], [x15]\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[0], [x13]\n" + "ld1 { v31.s }[0], [x11]\n" "b 83f\n" "81:" // Oddments: Load requant params: Bit 2: Unset - "tbz x4, #1, 82f\n" - "ld1 { v21.d }[0], [x17], #0x8\n" - "ld1 { v30.d }[0], [x15], #0x8\n" - "tbz x4, #0, 83f\n" - "ld1 { v21.s }[2], [x17]\n" - "ld1 { v30.s }[2], [x15]\n" + "tbz x8, #1, 82f\n" + "ld1 { v19.d }[0], [x13], #0x8\n" + "ld1 { v0.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[2], [x13]\n" + "ld1 { v0.s }[2], [x11]\n" "b 83f\n" "82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v21.s }[0], [x17]\n" - "ld1 { v30.s }[0], [x15]\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[0], [x13]\n" + "ld1 { v0.s }[0], [x11]\n" "83:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "add x14, x14, x7\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "add x13, x13, x7\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "add x12, x12, x7\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "add x11, x11, x7\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "and v8.16b, v10.16b, v9.16b\n" - "and v4.16b, v20.16b, v30.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v30.16b, v15.16b, v0.16b\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "and v28.16b, v9.16b, v0.16b\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v15.4s, v15.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v20.4s, v20.4s, v19.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smin v23.4s, v23.4s, v14.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smax v23.4s, v23.4s, v19.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "smin v16.4s, v16.4s, v14.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smax v16.4s, v16.4s, v19.4s\n" - "add v22.4s, v22.4s, v11.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "smin v17.4s, v17.4s, v14.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v18.4s, v18.4s, v11.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "tbz x4, #2, 85f\n" - "st1 { v15.s }[0], [x14], #0x4\n" - "st1 { v20.s }[0], [x13], #0x4\n" - "st1 { v16.s }[0], [x12], #0x4\n" - "st1 { v17.s }[0], [x11], #0x4\n" - "tbz x4, #1, 84f\n" - "st1 { v15.h }[2], [x14], #0x2\n" - "st1 { v20.h }[2], [x13], #0x2\n" - "st1 { v16.h }[2], [x12], #0x2\n" - "st1 { v17.h }[2], [x11], #0x2\n" - "tbz x4, #0, 87f\n" - "st1 { v15.b }[6], [x14], #0x1\n" - "st1 { v20.b }[6], [x13], #0x1\n" - "st1 { v16.b }[6], [x12], #0x1\n" - "st1 { v17.b }[6], [x11], #0x1\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "tbz x8, #2, 85f\n" + "st1 { v15.s }[0], [x10], #0x4\n" + "st1 { v9.s }[0], [x9], #0x4\n" + "st1 { v22.s }[0], [x28], #0x4\n" + "st1 { v23.s }[0], [x27], #0x4\n" + "tbz x8, #1, 84f\n" + "st1 { v15.h }[2], [x10], #0x2\n" + "st1 { v9.h }[2], [x9], #0x2\n" + "st1 { v22.h }[2], [x28], #0x2\n" + "st1 { v23.h }[2], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[6], [x10], #0x1\n" + "st1 { v9.b }[6], [x9], #0x1\n" + "st1 { v22.b }[6], [x28], #0x1\n" + "st1 { v23.b }[6], [x27], #0x1\n" "b 87f\n" "84:" // Oddments: Bit 2: Bit 1: Unset - "tbz x4, #0, 87f\n" - "st1 { v15.b }[4], [x14], #0x1\n" - "st1 { v20.b }[4], [x13], #0x1\n" - "st1 { v16.b }[4], [x12], #0x1\n" - "st1 { v17.b }[4], [x11], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[4], [x10], #0x1\n" + "st1 { v9.b }[4], [x9], #0x1\n" + "st1 { v22.b }[4], [x28], #0x1\n" + "st1 { v23.b }[4], [x27], #0x1\n" "b 87f\n" "85:" // Oddments: Bit 2: Unset - "tbz x4, #1, 86f\n" - "st1 { v15.h }[0], [x14], #0x2\n" - "st1 { v20.h }[0], [x13], #0x2\n" - "st1 { v16.h }[0], [x12], #0x2\n" - "st1 { v17.h }[0], [x11], #0x2\n" - "tbz x4, #0, 87f\n" - "st1 { v15.b }[2], [x14], #0x1\n" - "st1 { v20.b }[2], [x13], #0x1\n" - "st1 { v16.b }[2], [x12], #0x1\n" - "st1 { v17.b }[2], [x11], #0x1\n" + "tbz x8, #1, 86f\n" + "st1 { v15.h }[0], [x10], #0x2\n" + "st1 { v9.h }[0], [x9], #0x2\n" + "st1 { v22.h }[0], [x28], #0x2\n" + "st1 { v23.h }[0], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[2], [x10], #0x1\n" + "st1 { v9.b }[2], [x9], #0x1\n" + "st1 { v22.b }[2], [x28], #0x1\n" + "st1 { v23.b }[2], [x27], #0x1\n" "b 87f\n" "86:" // Oddments: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 87f\n" - "st1 { v15.b }[0], [x14], #0x1\n" - "st1 { v20.b }[0], [x13], #0x1\n" - "st1 { v16.b }[0], [x12], #0x1\n" - "st1 { v17.b }[0], [x11], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[0], [x10], #0x1\n" + "st1 { v9.b }[0], [x9], #0x1\n" + "st1 { v22.b }[0], [x28], #0x1\n" + "st1 { v23.b }[0], [x27], #0x1\n" "87:" // Oddments: Bit 2: End - "88:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } -- cgit v1.2.1