/* * Copyright (c) 2021-2023 Arm Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "arm_gemm.hpp" #include #include #if defined(__aarch64__) namespace arm_conv { namespace depthwise { void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( const unsigned int n_channels, const int8_t *const *const inptrs, const int8_t *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, const int32_t *const requant_shifts, int8_t *const *const outptrs ) { struct Params { long unsigned int n_channels; const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; const int32_t *const requant_shifts; int8_t *const *const outptrs; const int8_t *inptrs[16]; Params( long unsigned int n_channels, const int8_t *const *inptrs_raw, const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, const int32_t *const requant_shifts, int8_t *const *outptrs ) : n_channels(n_channels), weights(weights), bias(bias), requant(&qp), requant_muls(requant_muls), requant_shifts(requant_shifts), outptrs(outptrs) { inptrs[0] = inptrs_raw[5]; inptrs[1] = inptrs_raw[0]; inptrs[2] = inptrs_raw[3]; inptrs[3] = inptrs_raw[6]; inptrs[4] = inptrs_raw[9]; inptrs[5] = inptrs_raw[12]; inptrs[6] = inptrs_raw[15]; inptrs[7] = inptrs_raw[1]; inptrs[8] = inptrs_raw[2]; inptrs[9] = inptrs_raw[10]; inptrs[10] = inptrs_raw[4]; inptrs[11] = inptrs_raw[7]; inptrs[12] = inptrs_raw[8]; inptrs[13] = inptrs_raw[11]; inptrs[14] = inptrs_raw[13]; inptrs[15] = inptrs_raw[14]; } }; const Params params(n_channels, inptrs, weights, bias, qp, requant_muls, requant_shifts, outptrs); __asm__ __volatile__( "ldr x6, [%x[params], %[offsetof_Params_n_channels]]\n" "ldr x23, [%x[params], %[offsetof_Params_requant]]\n" "lsr x7, x6, #0x3\n" "add x20, x23, %[offsetof_Requantize32_a_offset]\n" "ld1r { v24.16b }, [x20]\n" "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" "add x21, x23, %[offsetof_Requantize32_b_offset]\n" "add x20, x23, %[offsetof_Requantize32_c_offset]\n" "ld1r { v15.16b }, [x21]\n" "ld1r { v14.8h }, [x20]\n" "add x21, x23, %[offsetof_Requantize32_minval]\n" "add x20, x23, %[offsetof_Requantize32_maxval]\n" "ld1r { v12.8h }, [x21]\n" "ld1r { v11.8h }, [x20]\n" "mov x8, #0x0\n" "mov x17, #0x0\n" "add x16, %x[params], %[offsetof_Params_inptrs]\n" "ldr x15, [%x[params], %[offsetof_Params_weights]]\n" "ldr x14, [%x[params], %[offsetof_Params_requant_muls]]\n" "ldr x13, [%x[params], %[offsetof_Params_requant_shifts]]\n" "ldp x12, x11, [x22, #0x0]\n" "ldp x10, x9, [x22, #0x10]\n" "cbz x7, 3f\n" "ldr d0, [x15, #0x0]\n" "ldr d1, [x15, #0x8]\n" "subs x7, x7, #0x1\n" "ssubl v0.8h, v0.8b, v15.8b\n" "ldr d2, [x15, #0x10]\n" "ldr d3, [x15, #0x18]\n" "ssubl v1.8h, v1.8b, v15.8b\n" "ssubl v2.8h, v2.8b, v15.8b\n" "ldr d4, [x15, #0x20]\n" "ldr d5, [x15, #0x28]\n" "ssubl v3.8h, v3.8b, v15.8b\n" "ssubl v4.8h, v4.8b, v15.8b\n" "ldr d6, [x15, #0x30]\n" "ldr d7, [x15, #0x38]\n" "ssubl v5.8h, v5.8b, v15.8b\n" "ssubl v6.8h, v6.8b, v15.8b\n" "ldr d8, [x15, #0x40]\n" "ldr x28, [%x[params], %[offsetof_Params_bias]]\n" "ssubl v7.8h, v7.8b, v15.8b\n" "ssubl v8.8h, v8.8b, v15.8b\n" "ldr q13, [x28, #0x0]\n" "ldr q20, [x28, #0x10]\n" "add x28, x28, #0x20\n" "str x28, [%x[params], %[offsetof_Params_bias]]\n" "ldp x24, x23, [x16, #0x0]\n" "ldp x22, x21, [x16, #0x10]\n" "mov v9.16b, v13.16b\n" "mov v18.16b, v20.16b\n" "ldr d31, [x24, x8]\n" "ldr d30, [x23, x8]\n" "mov v16.16b, v13.16b\n" "mov v26.16b, v20.16b\n" "ldr d29, [x22, x8]\n" "ldr d28, [x21, x8]\n" "mov v25.16b, v13.16b\n" "mov v10.16b, v20.16b\n" "ldr x20, [x16, #0x20]\n" "ldr d27, [x20, x8]\n" "ssubl v31.8h, v31.8b, v24.8b\n" "ssubl v30.8h, v30.8b, v24.8b\n" "ssubl v29.8h, v29.8b, v24.8b\n" "ssubl v28.8h, v28.8b, v24.8b\n" "ssubl v27.8h, v27.8b, v24.8b\n" "beq 2f\n" "1:" // Loop "ldr q17, [x14, #0x0]\n" "ldr q22, [x13, #0x0]\n" "smlal v13.4s, v31.4h, v4.4h\n" "smlal2 v20.4s, v31.8h, v4.8h\n" "ldr q23, [x14, #0x10]\n" "smlal v9.4s, v31.4h, v3.4h\n" "smlal2 v18.4s, v31.8h, v3.8h\n" "ldr x21, [x16, #0x28]\n" "smlal v13.4s, v30.4h, v0.4h\n" "smlal2 v20.4s, v30.8h, v0.8h\n" "ldr q19, [x13, #0x10]\n" "ldr x28, [x16, #0x38]\n" "smlal v9.4s, v29.4h, v2.4h\n" "smlal2 v18.4s, v29.8h, v2.8h\n" "ldr x20, [x16, #0x30]\n" "ldr d29, [x20, x8]\n" "smlal v16.4s, v31.4h, v1.4h\n" "smlal2 v26.4s, v31.8h, v1.8h\n" "ldr x27, [x16, #0x40]\n" "ldr x26, [x16, #0x48]\n" "smlal v25.4s, v31.4h, v0.4h\n" "smlal2 v10.4s, v31.8h, v0.8h\n" "ldr d31, [x21, x8]\n" "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v13.4s, v28.4h, v5.4h\n" "smlal2 v20.4s, v28.8h, v5.8h\n" "ssubl v29.8h, v29.8b, v24.8b\n" "ldr x25, [x16, #0x50]\n" "smlal v9.4s, v28.4h, v4.4h\n" "smlal2 v18.4s, v28.8h, v4.8h\n" "ldr x24, [x16, #0x58]\n" "ldr x23, [x16, #0x60]\n" "smlal v16.4s, v28.4h, v2.4h\n" "smlal2 v26.4s, v28.8h, v2.8h\n" "ldr x22, [x16, #0x68]\n" "ldr x21, [x16, #0x70]\n" "smlal v25.4s, v28.4h, v1.4h\n" "smlal2 v10.4s, v28.8h, v1.8h\n" "ldr d28, [x28, x8]\n" "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v13.4s, v27.4h, v7.4h\n" "smlal2 v20.4s, v27.8h, v7.8h\n" "ldr x20, [x16, #0x78]\n" "ldr x28, [%x[params], %[offsetof_Params_bias]]\n" "smlal v9.4s, v27.4h, v6.4h\n" "smlal2 v18.4s, v27.8h, v6.8h\n" "add x15, x15, #0x48\n" "subs x7, x7, #0x1\n" "smlal v16.4s, v31.4h, v6.4h\n" "smlal2 v26.4s, v31.8h, v6.8h\n" "ldr d31, [x27, x8]\n" "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v25.4s, v27.4h, v3.4h\n" "smlal2 v10.4s, v27.8h, v3.8h\n" "add x14, x14, #0x20\n" "add x13, x13, #0x20\n" "smlal v13.4s, v28.4h, v1.4h\n" "smlal2 v20.4s, v28.8h, v1.8h\n" "smlal v9.4s, v28.4h, v0.4h\n" "smlal2 v18.4s, v28.8h, v0.8h\n" "ldr d30, [x26, x8]\n" "ssubl v30.8h, v30.8b, v24.8b\n" "smlal v16.4s, v27.4h, v4.4h\n" "smlal v25.4s, v29.4h, v8.4h\n" "smlal2 v26.4s, v27.8h, v4.8h\n" "ldr d28, [x24, x8]\n" "smlal2 v10.4s, v29.8h, v8.8h\n" "ldr d29, [x25, x8]\n" "smlal v13.4s, v31.4h, v2.4h\n" "smlal2 v20.4s, v31.8h, v2.8h\n" "ssubl v29.8h, v29.8b, v24.8b\n" "smlal v9.4s, v31.4h, v1.4h\n" "smlal2 v18.4s, v31.8h, v1.8h\n" "ldr d31, [x23, x8]\n" "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v16.4s, v30.4h, v5.4h\n" "smlal v25.4s, v30.4h, v4.4h\n" "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v13.4s, v30.4h, v8.4h\n" "smlal2 v20.4s, v30.8h, v8.8h\n" "smlal v9.4s, v30.4h, v7.4h\n" "smlal2 v18.4s, v30.8h, v7.8h\n" "smlal2 v26.4s, v30.8h, v5.8h\n" "smlal2 v10.4s, v30.8h, v4.8h\n" "ldr d30, [x22, x8]\n" "ssubl v30.8h, v30.8b, v24.8b\n" "smlal v16.4s, v29.4h, v0.4h\n" "smlal v25.4s, v28.4h, v2.4h\n" "smlal v13.4s, v29.4h, v3.4h\n" "smlal2 v20.4s, v29.8h, v3.8h\n" "smlal2 v26.4s, v29.8h, v0.8h\n" "ldr d29, [x21, x8]\n" "smlal2 v10.4s, v28.8h, v2.8h\n" "ssubl v29.8h, v29.8b, v24.8b\n" "smlal v16.4s, v31.4h, v3.4h\n" "smlal v25.4s, v30.4h, v5.4h\n" "smlal v9.4s, v28.4h, v5.4h\n" "smlal2 v18.4s, v28.8h, v5.8h\n" "ldr d28, [x20, x8]\n" "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v13.4s, v31.4h, v6.4h\n" "smlal2 v26.4s, v31.8h, v3.8h\n" "sqrdmulh v13.4s, v13.4s, v17.4s\n" "add x8, x8, #0x8\n" "smlal2 v10.4s, v30.8h, v5.8h\n" "smlal v16.4s, v29.4h, v7.4h\n" "and v21.16b, v13.16b, v22.16b\n" "smlal v25.4s, v29.4h, v6.4h\n" "smlal2 v20.4s, v31.8h, v6.8h\n" "sqrdmulh v20.4s, v20.4s, v23.4s\n" "smlal2 v26.4s, v29.8h, v7.8h\n" "smlal2 v10.4s, v29.8h, v6.8h\n" "sshr v21.4s, v21.4s, #0x1f\n" "smlal v9.4s, v30.4h, v8.4h\n" "smlal v16.4s, v28.4h, v8.4h\n" "and v29.16b, v20.16b, v19.16b\n" "smlal v25.4s, v28.4h, v7.4h\n" "smlal2 v18.4s, v30.8h, v8.8h\n" "sqrdmulh v9.4s, v9.4s, v17.4s\n" "smlal2 v26.4s, v28.8h, v8.8h\n" "smlal2 v10.4s, v28.8h, v7.8h\n" "sqrdmulh v16.4s, v16.4s, v17.4s\n" "sqrdmulh v25.4s, v25.4s, v17.4s\n" "sqadd v13.4s, v13.4s, v21.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" "and v0.16b, v9.16b, v22.16b\n" "sqrdmulh v18.4s, v18.4s, v23.4s\n" "and v27.16b, v16.16b, v22.16b\n" "sqrdmulh v26.4s, v26.4s, v23.4s\n" "and v21.16b, v25.16b, v22.16b\n" "sqrdmulh v10.4s, v10.4s, v23.4s\n" "sqadd v20.4s, v20.4s, v29.4s\n" "sshr v0.4s, v0.4s, #0x1f\n" "and v17.16b, v18.16b, v19.16b\n" "sshr v27.4s, v27.4s, #0x1f\n" "and v7.16b, v26.16b, v19.16b\n" "sshr v21.4s, v21.4s, #0x1f\n" "and v29.16b, v10.16b, v19.16b\n" "sqadd v9.4s, v9.4s, v0.4s\n" "sshr v17.4s, v17.4s, #0x1f\n" "sqadd v16.4s, v16.4s, v27.4s\n" "sshr v7.4s, v7.4s, #0x1f\n" "sqadd v25.4s, v25.4s, v21.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" "srshl v13.4s, v13.4s, v22.4s\n" "srshl v9.4s, v9.4s, v22.4s\n" "sqadd v18.4s, v18.4s, v17.4s\n" "srshl v16.4s, v16.4s, v22.4s\n" "sqadd v26.4s, v26.4s, v7.4s\n" "srshl v25.4s, v25.4s, v22.4s\n" "sqadd v10.4s, v10.4s, v29.4s\n" "srshl v20.4s, v20.4s, v19.4s\n" "sqxtn v13.4h, v13.4s\n" "srshl v18.4s, v18.4s, v19.4s\n" "sqxtn v9.4h, v9.4s\n" "srshl v26.4s, v26.4s, v19.4s\n" "sqxtn v16.4h, v16.4s\n" "srshl v10.4s, v10.4s, v19.4s\n" "sqxtn v25.4h, v25.4s\n" "sqxtn2 v13.8h, v20.4s\n" "sqxtn2 v9.8h, v18.4s\n" "sqxtn2 v16.8h, v26.4s\n" "sqxtn2 v25.8h, v10.4s\n" "sqadd v13.8h, v13.8h, v14.8h\n" "sqadd v9.8h, v9.8h, v14.8h\n" "sqadd v16.8h, v16.8h, v14.8h\n" "sqadd v25.8h, v25.8h, v14.8h\n" "smax v13.8h, v13.8h, v12.8h\n" "smax v9.8h, v9.8h, v12.8h\n" "smax v16.8h, v16.8h, v12.8h\n" "smax v25.8h, v25.8h, v12.8h\n" "smin v13.8h, v13.8h, v11.8h\n" "smin v9.8h, v9.8h, v11.8h\n" "smin v16.8h, v16.8h, v11.8h\n" "smin v25.8h, v25.8h, v11.8h\n" "uzp1 v13.16b, v13.16b, v13.16b\n" "str d13, [x12, x17]\n" "uzp1 v9.16b, v9.16b, v9.16b\n" "uzp1 v16.16b, v16.16b, v16.16b\n" "str d9, [x11, x17]\n" "uzp1 v25.16b, v25.16b, v25.16b\n" "str d16, [x10, x17]\n" "str d25, [x9, x17]\n" "ldr q13, [x28, #0x0]\n" "ldr q20, [x28, #0x10]\n" "add x28, x28, #0x20\n" "ldr d0, [x15, #0x0]\n" "ldr d1, [x15, #0x8]\n" "add x17, x17, #0x8\n" "str x28, [%x[params], %[offsetof_Params_bias]]\n" "ldr d2, [x15, #0x10]\n" "ldr d3, [x15, #0x18]\n" "mov v9.16b, v13.16b\n" "mov v18.16b, v20.16b\n" "ldr d4, [x15, #0x20]\n" "ldr d5, [x15, #0x28]\n" "mov v16.16b, v13.16b\n" "mov v26.16b, v20.16b\n" "ldr d6, [x15, #0x30]\n" "ldr d7, [x15, #0x38]\n" "mov v25.16b, v13.16b\n" "mov v10.16b, v20.16b\n" "ldr d8, [x15, #0x40]\n" "ldp x24, x23, [x16, #0x0]\n" "ssubl v0.8h, v0.8b, v15.8b\n" "ssubl v1.8h, v1.8b, v15.8b\n" "ldp x22, x21, [x16, #0x10]\n" "ldr d31, [x24, x8]\n" "ssubl v2.8h, v2.8b, v15.8b\n" "ssubl v3.8h, v3.8b, v15.8b\n" "ldr d30, [x23, x8]\n" "ldr d29, [x22, x8]\n" "ssubl v4.8h, v4.8b, v15.8b\n" "ssubl v5.8h, v5.8b, v15.8b\n" "ldr d28, [x21, x8]\n" "ldr x20, [x16, #0x20]\n" "ssubl v6.8h, v6.8b, v15.8b\n" "ssubl v7.8h, v7.8b, v15.8b\n" "ldr d27, [x20, x8]\n" "ssubl v8.8h, v8.8b, v15.8b\n" "ssubl v31.8h, v31.8b, v24.8b\n" "ssubl v30.8h, v30.8b, v24.8b\n" "ssubl v29.8h, v29.8b, v24.8b\n" "ssubl v28.8h, v28.8b, v24.8b\n" "ssubl v27.8h, v27.8b, v24.8b\n" "bgt 1b\n" "2:" // Tail "ldr q17, [x14, #0x0]\n" "ldr q22, [x13, #0x0]\n" "smlal v13.4s, v31.4h, v4.4h\n" "smlal2 v20.4s, v31.8h, v4.8h\n" "ldr q23, [x14, #0x10]\n" "smlal v9.4s, v31.4h, v3.4h\n" "smlal2 v18.4s, v31.8h, v3.8h\n" "ldr x21, [x16, #0x28]\n" "smlal v13.4s, v30.4h, v0.4h\n" "smlal2 v20.4s, v30.8h, v0.8h\n" "ldr q19, [x13, #0x10]\n" "ldr x28, [x16, #0x38]\n" "smlal v9.4s, v29.4h, v2.4h\n" "smlal2 v18.4s, v29.8h, v2.8h\n" "ldr x20, [x16, #0x30]\n" "ldr d29, [x20, x8]\n" "smlal v16.4s, v31.4h, v1.4h\n" "smlal2 v26.4s, v31.8h, v1.8h\n" "ldr x27, [x16, #0x40]\n" "ldr x26, [x16, #0x48]\n" "smlal v25.4s, v31.4h, v0.4h\n" "smlal2 v10.4s, v31.8h, v0.8h\n" "ldr d31, [x21, x8]\n" "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v13.4s, v28.4h, v5.4h\n" "smlal2 v20.4s, v28.8h, v5.8h\n" "ssubl v29.8h, v29.8b, v24.8b\n" "ldr x25, [x16, #0x50]\n" "smlal v9.4s, v28.4h, v4.4h\n" "smlal2 v18.4s, v28.8h, v4.8h\n" "ldr x24, [x16, #0x58]\n" "ldr x23, [x16, #0x60]\n" "smlal v16.4s, v28.4h, v2.4h\n" "smlal2 v26.4s, v28.8h, v2.8h\n" "ldr x22, [x16, #0x68]\n" "ldr x21, [x16, #0x70]\n" "smlal v25.4s, v28.4h, v1.4h\n" "smlal2 v10.4s, v28.8h, v1.8h\n" "ldr d28, [x28, x8]\n" "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v13.4s, v27.4h, v7.4h\n" "smlal2 v20.4s, v27.8h, v7.8h\n" "ldr x20, [x16, #0x78]\n" "tst x6, #0x7\n" "smlal v9.4s, v27.4h, v6.4h\n" "smlal2 v18.4s, v27.8h, v6.8h\n" "add x14, x14, #0x20\n" "add x13, x13, #0x20\n" "smlal v16.4s, v31.4h, v6.4h\n" "smlal2 v26.4s, v31.8h, v6.8h\n" "ldr d31, [x27, x8]\n" "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v25.4s, v27.4h, v3.4h\n" "smlal2 v10.4s, v27.8h, v3.8h\n" "smlal v13.4s, v28.4h, v1.4h\n" "smlal2 v20.4s, v28.8h, v1.8h\n" "smlal v9.4s, v28.4h, v0.4h\n" "smlal2 v18.4s, v28.8h, v0.8h\n" "ldr d30, [x26, x8]\n" "ssubl v30.8h, v30.8b, v24.8b\n" "smlal v16.4s, v27.4h, v4.4h\n" "smlal v25.4s, v29.4h, v8.4h\n" "smlal2 v26.4s, v27.8h, v4.8h\n" "ldr d28, [x24, x8]\n" "smlal2 v10.4s, v29.8h, v8.8h\n" "ldr d29, [x25, x8]\n" "smlal v13.4s, v31.4h, v2.4h\n" "smlal2 v20.4s, v31.8h, v2.8h\n" "ssubl v29.8h, v29.8b, v24.8b\n" "smlal v9.4s, v31.4h, v1.4h\n" "smlal2 v18.4s, v31.8h, v1.8h\n" "ldr d31, [x23, x8]\n" "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v16.4s, v30.4h, v5.4h\n" "smlal v25.4s, v30.4h, v4.4h\n" "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v13.4s, v30.4h, v8.4h\n" "smlal2 v20.4s, v30.8h, v8.8h\n" "smlal v9.4s, v30.4h, v7.4h\n" "smlal2 v18.4s, v30.8h, v7.8h\n" "smlal2 v26.4s, v30.8h, v5.8h\n" "smlal2 v10.4s, v30.8h, v4.8h\n" "ldr d30, [x22, x8]\n" "ssubl v30.8h, v30.8b, v24.8b\n" "smlal v16.4s, v29.4h, v0.4h\n" "smlal v25.4s, v28.4h, v2.4h\n" "smlal v13.4s, v29.4h, v3.4h\n" "smlal2 v20.4s, v29.8h, v3.8h\n" "smlal2 v26.4s, v29.8h, v0.8h\n" "ldr d29, [x21, x8]\n" "smlal2 v10.4s, v28.8h, v2.8h\n" "ssubl v29.8h, v29.8b, v24.8b\n" "smlal v16.4s, v31.4h, v3.4h\n" "smlal v25.4s, v30.4h, v5.4h\n" "smlal v9.4s, v28.4h, v5.4h\n" "smlal2 v18.4s, v28.8h, v5.8h\n" "ldr d28, [x20, x8]\n" "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v13.4s, v31.4h, v6.4h\n" "smlal2 v26.4s, v31.8h, v3.8h\n" "sqrdmulh v13.4s, v13.4s, v17.4s\n" "add x8, x8, #0x8\n" "smlal2 v10.4s, v30.8h, v5.8h\n" "smlal v16.4s, v29.4h, v7.4h\n" "and v21.16b, v13.16b, v22.16b\n" "smlal v25.4s, v29.4h, v6.4h\n" "smlal2 v20.4s, v31.8h, v6.8h\n" "sqrdmulh v20.4s, v20.4s, v23.4s\n" "smlal2 v26.4s, v29.8h, v7.8h\n" "smlal2 v10.4s, v29.8h, v6.8h\n" "sshr v21.4s, v21.4s, #0x1f\n" "smlal v9.4s, v30.4h, v8.4h\n" "smlal v16.4s, v28.4h, v8.4h\n" "and v29.16b, v20.16b, v19.16b\n" "smlal v25.4s, v28.4h, v7.4h\n" "smlal2 v18.4s, v30.8h, v8.8h\n" "sqrdmulh v9.4s, v9.4s, v17.4s\n" "smlal2 v26.4s, v28.8h, v8.8h\n" "smlal2 v10.4s, v28.8h, v7.8h\n" "sqrdmulh v16.4s, v16.4s, v17.4s\n" "sqrdmulh v25.4s, v25.4s, v17.4s\n" "sqadd v13.4s, v13.4s, v21.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" "and v0.16b, v9.16b, v22.16b\n" "sqrdmulh v18.4s, v18.4s, v23.4s\n" "and v27.16b, v16.16b, v22.16b\n" "sqrdmulh v26.4s, v26.4s, v23.4s\n" "and v21.16b, v25.16b, v22.16b\n" "sqrdmulh v10.4s, v10.4s, v23.4s\n" "sqadd v20.4s, v20.4s, v29.4s\n" "sshr v0.4s, v0.4s, #0x1f\n" "and v17.16b, v18.16b, v19.16b\n" "sshr v27.4s, v27.4s, #0x1f\n" "and v7.16b, v26.16b, v19.16b\n" "sshr v21.4s, v21.4s, #0x1f\n" "and v29.16b, v10.16b, v19.16b\n" "sqadd v9.4s, v9.4s, v0.4s\n" "sshr v17.4s, v17.4s, #0x1f\n" "sqadd v16.4s, v16.4s, v27.4s\n" "sshr v7.4s, v7.4s, #0x1f\n" "sqadd v25.4s, v25.4s, v21.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" "srshl v13.4s, v13.4s, v22.4s\n" "srshl v9.4s, v9.4s, v22.4s\n" "sqadd v18.4s, v18.4s, v17.4s\n" "srshl v16.4s, v16.4s, v22.4s\n" "sqadd v26.4s, v26.4s, v7.4s\n" "srshl v25.4s, v25.4s, v22.4s\n" "sqadd v10.4s, v10.4s, v29.4s\n" "srshl v20.4s, v20.4s, v19.4s\n" "sqxtn v13.4h, v13.4s\n" "srshl v18.4s, v18.4s, v19.4s\n" "sqxtn v9.4h, v9.4s\n" "srshl v26.4s, v26.4s, v19.4s\n" "sqxtn v16.4h, v16.4s\n" "srshl v10.4s, v10.4s, v19.4s\n" "sqxtn v25.4h, v25.4s\n" "sqxtn2 v13.8h, v20.4s\n" "sqxtn2 v9.8h, v18.4s\n" "sqxtn2 v16.8h, v26.4s\n" "sqxtn2 v25.8h, v10.4s\n" "sqadd v13.8h, v13.8h, v14.8h\n" "sqadd v9.8h, v9.8h, v14.8h\n" "sqadd v16.8h, v16.8h, v14.8h\n" "sqadd v25.8h, v25.8h, v14.8h\n" "smax v13.8h, v13.8h, v12.8h\n" "smax v9.8h, v9.8h, v12.8h\n" "smax v16.8h, v16.8h, v12.8h\n" "smax v25.8h, v25.8h, v12.8h\n" "smin v13.8h, v13.8h, v11.8h\n" "smin v9.8h, v9.8h, v11.8h\n" "smin v16.8h, v16.8h, v11.8h\n" "smin v25.8h, v25.8h, v11.8h\n" "uzp1 v13.16b, v13.16b, v13.16b\n" "str d13, [x12, x17]\n" "uzp1 v9.16b, v9.16b, v9.16b\n" "uzp1 v16.16b, v16.16b, v16.16b\n" "str d9, [x11, x17]\n" "uzp1 v25.16b, v25.16b, v25.16b\n" "str d16, [x10, x17]\n" "str d25, [x9, x17]\n" "add x17, x17, #0x8\n" "beq 64f\n" "add x15, x15, #0x48\n" "3:" // Oddments "ldr x28, [%x[params], %[offsetof_Params_bias]]\n" "tbz x6, #2, 5f\n" "ld1 { v13.4s }, [x28], #0x10\n" "tbz x6, #1, 4f\n" "ld1 { v20.d }[0], [x28], #0x8\n" "tbz x6, #0, 7f\n" "ld1 { v20.s }[2], [x28]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset "tbz x6, #0, 7f\n" "ld1 { v20.s }[0], [x28]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset "tbz x6, #1, 6f\n" "ld1 { v13.d }[0], [x28], #0x8\n" "tbz x6, #0, 7f\n" "ld1 { v13.s }[2], [x28]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 7f\n" "ld1 { v13.s }[0], [x28]\n" "7:" // Oddments: Load bias: Bit 2: End "ldr d0, [x15, #0x0]\n" "ldr d1, [x15, #0x8]\n" "mov v9.16b, v13.16b\n" "mov v18.16b, v20.16b\n" "ldr d2, [x15, #0x10]\n" "ldr d3, [x15, #0x18]\n" "mov v16.16b, v13.16b\n" "mov v26.16b, v20.16b\n" "ldr d4, [x15, #0x20]\n" "ldr d5, [x15, #0x28]\n" "mov v25.16b, v13.16b\n" "mov v10.16b, v20.16b\n" "ldr d6, [x15, #0x30]\n" "ldr d7, [x15, #0x38]\n" "ssubl v0.8h, v0.8b, v15.8b\n" "ssubl v1.8h, v1.8b, v15.8b\n" "ldr d8, [x15, #0x40]\n" "ldp x24, x23, [x16, #0x0]\n" "ssubl v2.8h, v2.8b, v15.8b\n" "ssubl v3.8h, v3.8b, v15.8b\n" "ldp x22, x21, [x16, #0x10]\n" "ldr x20, [x16, #0x20]\n" "ssubl v4.8h, v4.8b, v15.8b\n" "ssubl v5.8h, v5.8b, v15.8b\n" "ssubl v6.8h, v6.8b, v15.8b\n" "ssubl v7.8h, v7.8b, v15.8b\n" "ssubl v8.8h, v8.8b, v15.8b\n" "add x24, x24, x8\n" "add x23, x23, x8\n" "add x22, x22, x8\n" "add x21, x21, x8\n" "add x20, x20, x8\n" "tbz x6, #2, 9f\n" "ld1 { v31.s }[0], [x24], #0x4\n" "ld1 { v30.s }[0], [x23], #0x4\n" "ld1 { v29.s }[0], [x22], #0x4\n" "ld1 { v28.s }[0], [x21], #0x4\n" "ld1 { v27.s }[0], [x20], #0x4\n" "tbz x6, #1, 8f\n" "ld1 { v31.h }[2], [x24], #0x2\n" "ld1 { v30.h }[2], [x23], #0x2\n" "ld1 { v29.h }[2], [x22], #0x2\n" "ld1 { v28.h }[2], [x21], #0x2\n" "ld1 { v27.h }[2], [x20], #0x2\n" "tbz x6, #0, 11f\n" "ld1 { v31.b }[6], [x24]\n" "ld1 { v30.b }[6], [x23]\n" "ld1 { v29.b }[6], [x22]\n" "ld1 { v28.b }[6], [x21]\n" "ld1 { v27.b }[6], [x20]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset "tbz x6, #0, 11f\n" "ld1 { v31.b }[4], [x24]\n" "ld1 { v30.b }[4], [x23]\n" "ld1 { v29.b }[4], [x22]\n" "ld1 { v28.b }[4], [x21]\n" "ld1 { v27.b }[4], [x20]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset "tbz x6, #1, 10f\n" "ld1 { v31.h }[0], [x24], #0x2\n" "ld1 { v30.h }[0], [x23], #0x2\n" "ld1 { v29.h }[0], [x22], #0x2\n" "ld1 { v28.h }[0], [x21], #0x2\n" "ld1 { v27.h }[0], [x20], #0x2\n" "tbz x6, #0, 11f\n" "ld1 { v31.b }[2], [x24]\n" "ld1 { v30.b }[2], [x23]\n" "ld1 { v29.b }[2], [x22]\n" "ld1 { v28.b }[2], [x21]\n" "ld1 { v27.b }[2], [x20]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 11f\n" "ld1 { v31.b }[0], [x24]\n" "ld1 { v30.b }[0], [x23]\n" "ld1 { v29.b }[0], [x22]\n" "ld1 { v28.b }[0], [x21]\n" "ld1 { v27.b }[0], [x20]\n" "11:" // Oddments: Initial loads: Bit 2: End "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v13.4s, v31.4h, v4.4h\n" "smlal2 v20.4s, v31.8h, v4.8h\n" "ldr x21, [x16, #0x28]\n" "smlal v9.4s, v31.4h, v3.4h\n" "smlal2 v18.4s, v31.8h, v3.8h\n" "ssubl v30.8h, v30.8b, v24.8b\n" "add x21, x21, x8\n" "ssubl v29.8h, v29.8b, v24.8b\n" "smlal v16.4s, v31.4h, v1.4h\n" "smlal2 v26.4s, v31.8h, v1.8h\n" "smlal v25.4s, v31.4h, v0.4h\n" "smlal2 v10.4s, v31.8h, v0.8h\n" "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v13.4s, v30.4h, v0.4h\n" "smlal2 v20.4s, v30.8h, v0.8h\n" "ssubl v27.8h, v27.8b, v24.8b\n" "smlal v9.4s, v29.4h, v2.4h\n" "smlal2 v18.4s, v29.8h, v2.8h\n" "smlal v13.4s, v28.4h, v5.4h\n" "smlal2 v20.4s, v28.8h, v5.8h\n" "smlal v9.4s, v28.4h, v4.4h\n" "smlal2 v18.4s, v28.8h, v4.8h\n" "smlal v16.4s, v28.4h, v2.4h\n" "smlal2 v26.4s, v28.8h, v2.8h\n" "smlal v25.4s, v28.4h, v1.4h\n" "smlal2 v10.4s, v28.8h, v1.8h\n" "tbz x6, #2, 13f\n" "ld1 { v31.s }[0], [x21], #0x4\n" "tbz x6, #1, 12f\n" "ld1 { v31.h }[2], [x21], #0x2\n" "tbz x6, #0, 15f\n" "ld1 { v31.b }[6], [x21]\n" "b 15f\n" "12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset "tbz x6, #0, 15f\n" "ld1 { v31.b }[4], [x21]\n" "b 15f\n" "13:" // Oddments: Load (3, 0): Bit 2: Unset "tbz x6, #1, 14f\n" "ld1 { v31.h }[0], [x21], #0x2\n" "tbz x6, #0, 15f\n" "ld1 { v31.b }[2], [x21]\n" "b 15f\n" "14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 15f\n" "ld1 { v31.b }[0], [x21]\n" "15:" // Oddments: Load (3, 0): Bit 2: End "ssubl v31.8h, v31.8b, v24.8b\n" "smlal v16.4s, v31.4h, v6.4h\n" "smlal2 v26.4s, v31.8h, v6.8h\n" "ldr x20, [x16, #0x30]\n" "smlal v13.4s, v27.4h, v7.4h\n" "smlal2 v20.4s, v27.8h, v7.8h\n" "add x20, x20, x8\n" "smlal v9.4s, v27.4h, v6.4h\n" "smlal2 v18.4s, v27.8h, v6.8h\n" "smlal v16.4s, v27.4h, v4.4h\n" "smlal2 v26.4s, v27.8h, v4.8h\n" "smlal v25.4s, v27.4h, v3.4h\n" "smlal2 v10.4s, v27.8h, v3.8h\n" "tbz x6, #2, 17f\n" "ld1 { v29.s }[0], [x20], #0x4\n" "tbz x6, #1, 16f\n" "ld1 { v29.h }[2], [x20], #0x2\n" "tbz x6, #0, 19f\n" "ld1 { v29.b }[6], [x20]\n" "b 19f\n" "16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset "tbz x6, #0, 19f\n" "ld1 { v29.b }[4], [x20]\n" "b 19f\n" "17:" // Oddments: Load (3, 3): Bit 2: Unset "tbz x6, #1, 18f\n" "ld1 { v29.h }[0], [x20], #0x2\n" "tbz x6, #0, 19f\n" "ld1 { v29.b }[2], [x20]\n" "b 19f\n" "18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 19f\n" "ld1 { v29.b }[0], [x20]\n" "19:" // Oddments: Load (3, 3): Bit 2: End "ssubl v29.8h, v29.8b, v24.8b\n" "ldr x28, [x16, #0x38]\n" "smlal v25.4s, v29.4h, v8.4h\n" "smlal2 v10.4s, v29.8h, v8.8h\n" "add x28, x28, x8\n" "tbz x6, #2, 21f\n" "ld1 { v28.s }[0], [x28], #0x4\n" "tbz x6, #1, 20f\n" "ld1 { v28.h }[2], [x28], #0x2\n" "tbz x6, #0, 23f\n" "ld1 { v28.b }[6], [x28]\n" "b 23f\n" "20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset "tbz x6, #0, 23f\n" "ld1 { v28.b }[4], [x28]\n" "b 23f\n" "21:" // Oddments: Load (0, 1): Bit 2: Unset "tbz x6, #1, 22f\n" "ld1 { v28.h }[0], [x28], #0x2\n" "tbz x6, #0, 23f\n" "ld1 { v28.b }[2], [x28]\n" "b 23f\n" "22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 23f\n" "ld1 { v28.b }[0], [x28]\n" "23:" // Oddments: Load (0, 1): Bit 2: End "ssubl v28.8h, v28.8b, v24.8b\n" "ldr x27, [x16, #0x40]\n" "smlal v13.4s, v28.4h, v1.4h\n" "smlal2 v20.4s, v28.8h, v1.8h\n" "smlal v9.4s, v28.4h, v0.4h\n" "smlal2 v18.4s, v28.8h, v0.8h\n" "add x27, x27, x8\n" "tbz x6, #2, 25f\n" "ld1 { v31.s }[0], [x27], #0x4\n" "tbz x6, #1, 24f\n" "ld1 { v31.h }[2], [x27], #0x2\n" "tbz x6, #0, 27f\n" "ld1 { v31.b }[6], [x27]\n" "b 27f\n" "24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset "tbz x6, #0, 27f\n" "ld1 { v31.b }[4], [x27]\n" "b 27f\n" "25:" // Oddments: Load (0, 2): Bit 2: Unset "tbz x6, #1, 26f\n" "ld1 { v31.h }[0], [x27], #0x2\n" "tbz x6, #0, 27f\n" "ld1 { v31.b }[2], [x27]\n" "b 27f\n" "26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 27f\n" "ld1 { v31.b }[0], [x27]\n" "27:" // Oddments: Load (0, 2): Bit 2: End "ssubl v31.8h, v31.8b, v24.8b\n" "ldr x26, [x16, #0x48]\n" "smlal v13.4s, v31.4h, v2.4h\n" "smlal2 v20.4s, v31.8h, v2.8h\n" "smlal v9.4s, v31.4h, v1.4h\n" "smlal2 v18.4s, v31.8h, v1.8h\n" "add x26, x26, x8\n" "tbz x6, #2, 29f\n" "ld1 { v30.s }[0], [x26], #0x4\n" "tbz x6, #1, 28f\n" "ld1 { v30.h }[2], [x26], #0x2\n" "tbz x6, #0, 31f\n" "ld1 { v30.b }[6], [x26]\n" "b 31f\n" "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset "tbz x6, #0, 31f\n" "ld1 { v30.b }[4], [x26]\n" "b 31f\n" "29:" // Oddments: Load (2, 2): Bit 2: Unset "tbz x6, #1, 30f\n" "ld1 { v30.h }[0], [x26], #0x2\n" "tbz x6, #0, 31f\n" "ld1 { v30.b }[2], [x26]\n" "b 31f\n" "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 31f\n" "ld1 { v30.b }[0], [x26]\n" "31:" // Oddments: Load (2, 2): Bit 2: End "ssubl v30.8h, v30.8b, v24.8b\n" "ldr x25, [x16, #0x50]\n" "smlal v13.4s, v30.4h, v8.4h\n" "smlal2 v20.4s, v30.8h, v8.8h\n" "smlal v9.4s, v30.4h, v7.4h\n" "smlal2 v18.4s, v30.8h, v7.8h\n" "add x25, x25, x8\n" "smlal v16.4s, v30.4h, v5.4h\n" "smlal2 v26.4s, v30.8h, v5.8h\n" "smlal v25.4s, v30.4h, v4.4h\n" "smlal2 v10.4s, v30.8h, v4.8h\n" "tbz x6, #2, 33f\n" "ld1 { v29.s }[0], [x25], #0x4\n" "tbz x6, #1, 32f\n" "ld1 { v29.h }[2], [x25], #0x2\n" "tbz x6, #0, 35f\n" "ld1 { v29.b }[6], [x25]\n" "b 35f\n" "32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset "tbz x6, #0, 35f\n" "ld1 { v29.b }[4], [x25]\n" "b 35f\n" "33:" // Oddments: Load (1, 0): Bit 2: Unset "tbz x6, #1, 34f\n" "ld1 { v29.h }[0], [x25], #0x2\n" "tbz x6, #0, 35f\n" "ld1 { v29.b }[2], [x25]\n" "b 35f\n" "34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 35f\n" "ld1 { v29.b }[0], [x25]\n" "35:" // Oddments: Load (1, 0): Bit 2: End "ssubl v29.8h, v29.8b, v24.8b\n" "ldr x24, [x16, #0x58]\n" "smlal v13.4s, v29.4h, v3.4h\n" "smlal2 v20.4s, v29.8h, v3.8h\n" "smlal v16.4s, v29.4h, v0.4h\n" "smlal2 v26.4s, v29.8h, v0.8h\n" "add x24, x24, x8\n" "tbz x6, #2, 37f\n" "ld1 { v28.s }[0], [x24], #0x4\n" "tbz x6, #1, 36f\n" "ld1 { v28.h }[2], [x24], #0x2\n" "tbz x6, #0, 39f\n" "ld1 { v28.b }[6], [x24]\n" "b 39f\n" "36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset "tbz x6, #0, 39f\n" "ld1 { v28.b }[4], [x24]\n" "b 39f\n" "37:" // Oddments: Load (1, 3): Bit 2: Unset "tbz x6, #1, 38f\n" "ld1 { v28.h }[0], [x24], #0x2\n" "tbz x6, #0, 39f\n" "ld1 { v28.b }[2], [x24]\n" "b 39f\n" "38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 39f\n" "ld1 { v28.b }[0], [x24]\n" "39:" // Oddments: Load (1, 3): Bit 2: End "ssubl v28.8h, v28.8b, v24.8b\n" "ldr x23, [x16, #0x60]\n" "smlal v9.4s, v28.4h, v5.4h\n" "smlal2 v18.4s, v28.8h, v5.8h\n" "smlal v25.4s, v28.4h, v2.4h\n" "smlal2 v10.4s, v28.8h, v2.8h\n" "add x23, x23, x8\n" "tbz x6, #2, 41f\n" "ld1 { v31.s }[0], [x23], #0x4\n" "tbz x6, #1, 40f\n" "ld1 { v31.h }[2], [x23], #0x2\n" "tbz x6, #0, 43f\n" "ld1 { v31.b }[6], [x23]\n" "b 43f\n" "40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset "tbz x6, #0, 43f\n" "ld1 { v31.b }[4], [x23]\n" "b 43f\n" "41:" // Oddments: Load (2, 0): Bit 2: Unset "tbz x6, #1, 42f\n" "ld1 { v31.h }[0], [x23], #0x2\n" "tbz x6, #0, 43f\n" "ld1 { v31.b }[2], [x23]\n" "b 43f\n" "42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 43f\n" "ld1 { v31.b }[0], [x23]\n" "43:" // Oddments: Load (2, 0): Bit 2: End "ssubl v31.8h, v31.8b, v24.8b\n" "ldr x22, [x16, #0x68]\n" "smlal v13.4s, v31.4h, v6.4h\n" "smlal2 v20.4s, v31.8h, v6.8h\n" "smlal v16.4s, v31.4h, v3.4h\n" "smlal2 v26.4s, v31.8h, v3.8h\n" "add x22, x22, x8\n" "tbz x6, #2, 45f\n" "ld1 { v30.s }[0], [x22], #0x4\n" "tbz x6, #1, 44f\n" "ld1 { v30.h }[2], [x22], #0x2\n" "tbz x6, #0, 47f\n" "ld1 { v30.b }[6], [x22]\n" "b 47f\n" "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset "tbz x6, #0, 47f\n" "ld1 { v30.b }[4], [x22]\n" "b 47f\n" "45:" // Oddments: Load (2, 3): Bit 2: Unset "tbz x6, #1, 46f\n" "ld1 { v30.h }[0], [x22], #0x2\n" "tbz x6, #0, 47f\n" "ld1 { v30.b }[2], [x22]\n" "b 47f\n" "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 47f\n" "ld1 { v30.b }[0], [x22]\n" "47:" // Oddments: Load (2, 3): Bit 2: End "ssubl v30.8h, v30.8b, v24.8b\n" "ldr x21, [x16, #0x70]\n" "smlal v9.4s, v30.4h, v8.4h\n" "smlal2 v18.4s, v30.8h, v8.8h\n" "smlal v25.4s, v30.4h, v5.4h\n" "smlal2 v10.4s, v30.8h, v5.8h\n" "add x21, x21, x8\n" "tbz x6, #2, 49f\n" "ld1 { v29.s }[0], [x21], #0x4\n" "tbz x6, #1, 48f\n" "ld1 { v29.h }[2], [x21], #0x2\n" "tbz x6, #0, 51f\n" "ld1 { v29.b }[6], [x21]\n" "b 51f\n" "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset "tbz x6, #0, 51f\n" "ld1 { v29.b }[4], [x21]\n" "b 51f\n" "49:" // Oddments: Load (3, 1): Bit 2: Unset "tbz x6, #1, 50f\n" "ld1 { v29.h }[0], [x21], #0x2\n" "tbz x6, #0, 51f\n" "ld1 { v29.b }[2], [x21]\n" "b 51f\n" "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 51f\n" "ld1 { v29.b }[0], [x21]\n" "51:" // Oddments: Load (3, 1): Bit 2: End "ssubl v29.8h, v29.8b, v24.8b\n" "ldr x20, [x16, #0x78]\n" "smlal v16.4s, v29.4h, v7.4h\n" "smlal2 v26.4s, v29.8h, v7.8h\n" "smlal v25.4s, v29.4h, v6.4h\n" "smlal2 v10.4s, v29.8h, v6.8h\n" "add x20, x20, x8\n" "tbz x6, #2, 53f\n" "ld1 { v28.s }[0], [x20], #0x4\n" "tbz x6, #1, 52f\n" "ld1 { v28.h }[2], [x20], #0x2\n" "tbz x6, #0, 55f\n" "ld1 { v28.b }[6], [x20]\n" "b 55f\n" "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset "tbz x6, #0, 55f\n" "ld1 { v28.b }[4], [x20]\n" "b 55f\n" "53:" // Oddments: Load (3, 2): Bit 2: Unset "tbz x6, #1, 54f\n" "ld1 { v28.h }[0], [x20], #0x2\n" "tbz x6, #0, 55f\n" "ld1 { v28.b }[2], [x20]\n" "b 55f\n" "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 55f\n" "ld1 { v28.b }[0], [x20]\n" "55:" // Oddments: Load (3, 2): Bit 2: End "ssubl v28.8h, v28.8b, v24.8b\n" "smlal v16.4s, v28.4h, v8.4h\n" "smlal2 v26.4s, v28.8h, v8.8h\n" "smlal v25.4s, v28.4h, v7.4h\n" "smlal2 v10.4s, v28.8h, v7.8h\n" "tbz x6, #2, 57f\n" "ld1 { v17.4s }, [x14], #0x10\n" "ld1 { v22.4s }, [x13], #0x10\n" "tbz x6, #1, 56f\n" "ld1 { v23.d }[0], [x14], #0x8\n" "ld1 { v19.d }[0], [x13], #0x8\n" "tbz x6, #0, 59f\n" "ld1 { v23.s }[2], [x14]\n" "ld1 { v19.s }[2], [x13]\n" "b 59f\n" "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset "tbz x6, #0, 59f\n" "ld1 { v23.s }[0], [x14]\n" "ld1 { v19.s }[0], [x13]\n" "b 59f\n" "57:" // Oddments: Load requant params: Bit 2: Unset "tbz x6, #1, 58f\n" "ld1 { v17.d }[0], [x14], #0x8\n" "ld1 { v22.d }[0], [x13], #0x8\n" "tbz x6, #0, 59f\n" "ld1 { v17.s }[2], [x14]\n" "ld1 { v22.s }[2], [x13]\n" "b 59f\n" "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 59f\n" "ld1 { v17.s }[0], [x14]\n" "ld1 { v22.s }[0], [x13]\n" "59:" // Oddments: Load requant params: Bit 2: End "sqrdmulh v13.4s, v13.4s, v17.4s\n" "and v21.16b, v13.16b, v22.16b\n" "add x12, x12, x17\n" "add x11, x11, x17\n" "sqrdmulh v20.4s, v20.4s, v23.4s\n" "sshr v21.4s, v21.4s, #0x1f\n" "add x10, x10, x17\n" "add x9, x9, x17\n" "and v29.16b, v20.16b, v19.16b\n" "sqrdmulh v9.4s, v9.4s, v17.4s\n" "sqrdmulh v16.4s, v16.4s, v17.4s\n" "sqrdmulh v25.4s, v25.4s, v17.4s\n" "sqadd v13.4s, v13.4s, v21.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" "and v0.16b, v9.16b, v22.16b\n" "sqrdmulh v18.4s, v18.4s, v23.4s\n" "and v27.16b, v16.16b, v22.16b\n" "sqrdmulh v26.4s, v26.4s, v23.4s\n" "and v21.16b, v25.16b, v22.16b\n" "sqrdmulh v10.4s, v10.4s, v23.4s\n" "sqadd v20.4s, v20.4s, v29.4s\n" "sshr v0.4s, v0.4s, #0x1f\n" "and v17.16b, v18.16b, v19.16b\n" "sshr v27.4s, v27.4s, #0x1f\n" "and v7.16b, v26.16b, v19.16b\n" "sshr v21.4s, v21.4s, #0x1f\n" "and v29.16b, v10.16b, v19.16b\n" "sqadd v9.4s, v9.4s, v0.4s\n" "sshr v17.4s, v17.4s, #0x1f\n" "sqadd v16.4s, v16.4s, v27.4s\n" "sshr v7.4s, v7.4s, #0x1f\n" "sqadd v25.4s, v25.4s, v21.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" "srshl v13.4s, v13.4s, v22.4s\n" "srshl v9.4s, v9.4s, v22.4s\n" "sqadd v18.4s, v18.4s, v17.4s\n" "srshl v16.4s, v16.4s, v22.4s\n" "sqadd v26.4s, v26.4s, v7.4s\n" "srshl v25.4s, v25.4s, v22.4s\n" "sqadd v10.4s, v10.4s, v29.4s\n" "srshl v20.4s, v20.4s, v19.4s\n" "sqxtn v13.4h, v13.4s\n" "srshl v18.4s, v18.4s, v19.4s\n" "sqxtn v9.4h, v9.4s\n" "srshl v26.4s, v26.4s, v19.4s\n" "sqxtn v16.4h, v16.4s\n" "srshl v10.4s, v10.4s, v19.4s\n" "sqxtn v25.4h, v25.4s\n" "sqxtn2 v13.8h, v20.4s\n" "sqxtn2 v9.8h, v18.4s\n" "sqxtn2 v16.8h, v26.4s\n" "sqxtn2 v25.8h, v10.4s\n" "sqadd v13.8h, v13.8h, v14.8h\n" "sqadd v9.8h, v9.8h, v14.8h\n" "sqadd v16.8h, v16.8h, v14.8h\n" "sqadd v25.8h, v25.8h, v14.8h\n" "smax v13.8h, v13.8h, v12.8h\n" "smax v9.8h, v9.8h, v12.8h\n" "smax v16.8h, v16.8h, v12.8h\n" "smax v25.8h, v25.8h, v12.8h\n" "smin v13.8h, v13.8h, v11.8h\n" "smin v9.8h, v9.8h, v11.8h\n" "smin v16.8h, v16.8h, v11.8h\n" "smin v25.8h, v25.8h, v11.8h\n" "uzp1 v13.16b, v13.16b, v13.16b\n" "uzp1 v9.16b, v9.16b, v9.16b\n" "uzp1 v16.16b, v16.16b, v16.16b\n" "uzp1 v25.16b, v25.16b, v25.16b\n" "tbz x6, #2, 61f\n" "st1 { v13.s }[0], [x12], #0x4\n" "st1 { v9.s }[0], [x11], #0x4\n" "st1 { v16.s }[0], [x10], #0x4\n" "st1 { v25.s }[0], [x9], #0x4\n" "tbz x6, #1, 60f\n" "st1 { v13.h }[2], [x12], #0x2\n" "st1 { v9.h }[2], [x11], #0x2\n" "st1 { v16.h }[2], [x10], #0x2\n" "st1 { v25.h }[2], [x9], #0x2\n" "tbz x6, #0, 63f\n" "st1 { v13.b }[6], [x12], #0x1\n" "st1 { v9.b }[6], [x11], #0x1\n" "st1 { v16.b }[6], [x10], #0x1\n" "st1 { v25.b }[6], [x9], #0x1\n" "b 63f\n" "60:" // Oddments: Bit 2: Bit 1: Unset "tbz x6, #0, 63f\n" "st1 { v13.b }[4], [x12], #0x1\n" "st1 { v9.b }[4], [x11], #0x1\n" "st1 { v16.b }[4], [x10], #0x1\n" "st1 { v25.b }[4], [x9], #0x1\n" "b 63f\n" "61:" // Oddments: Bit 2: Unset "tbz x6, #1, 62f\n" "st1 { v13.h }[0], [x12], #0x2\n" "st1 { v9.h }[0], [x11], #0x2\n" "st1 { v16.h }[0], [x10], #0x2\n" "st1 { v25.h }[0], [x9], #0x2\n" "tbz x6, #0, 63f\n" "st1 { v13.b }[2], [x12], #0x1\n" "st1 { v9.b }[2], [x11], #0x1\n" "st1 { v16.b }[2], [x10], #0x1\n" "st1 { v25.b }[2], [x9], #0x1\n" "b 63f\n" "62:" // Oddments: Bit 2: Unset: Bit 1: Unset "tbz x6, #0, 63f\n" "st1 { v13.b }[0], [x12], #0x1\n" "st1 { v9.b }[0], [x11], #0x1\n" "st1 { v16.b }[0], [x10], #0x1\n" "st1 { v25.b }[0], [x9], #0x1\n" "63:" // Oddments: Bit 2: End "64:" // End : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } } // namespace depthwise } // namespace arm_conv #endif // defined(__aarch64__)