/* * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "arm_gemm.hpp" #include #include #if defined(__aarch64__) namespace arm_conv { namespace depthwise { void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( const unsigned int n_channels, const uint8_t *const *const inptrs, const uint8_t *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, const int32_t *const requant_shifts, uint8_t *const *const outptrs ) { struct Params { long unsigned int n_channels; const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; const int32_t *const requant_shifts; uint8_t *const *const outptrs; const uint8_t *inptrs[16]; Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, const int32_t *const requant_shifts, uint8_t *const *outptrs ) : n_channels(n_channels), weights(weights), bias(bias), requant(&qp), requant_muls(requant_muls), requant_shifts(requant_shifts), outptrs(outptrs) { inptrs[0] = inptrs_raw[5]; inptrs[1] = inptrs_raw[0]; inptrs[2] = inptrs_raw[3]; inptrs[3] = inptrs_raw[6]; inptrs[4] = inptrs_raw[9]; inptrs[5] = inptrs_raw[12]; inptrs[6] = inptrs_raw[15]; inptrs[7] = inptrs_raw[1]; inptrs[8] = inptrs_raw[2]; inptrs[9] = inptrs_raw[10]; inptrs[10] = inptrs_raw[4]; inptrs[11] = inptrs_raw[7]; inptrs[12] = inptrs_raw[8]; inptrs[13] = inptrs_raw[11]; inptrs[14] = inptrs_raw[13]; inptrs[15] = inptrs_raw[14]; } }; const Params params(n_channels, inptrs, weights, bias, qp, requant_muls, requant_shifts, outptrs); __asm__ __volatile__( "ldr x16, [%x[params], %[offsetof_Params_n_channels]]\n" "ldr x23, [%x[params], %[offsetof_Params_requant]]\n" "lsr x15, x16, #0x3\n" "add x20, x23, %[offsetof_Requantize32_b_offset]\n" "ld1r { v18.16b }, [x20]\n" "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" "add x21, x23, %[offsetof_Requantize32_c_offset]\n" "add x20, x23, %[offsetof_Requantize32_minval]\n" "ld1r { v5.8h }, [x21]\n" "ld1r { v14.8h }, [x20]\n" "add x20, x23, %[offsetof_Requantize32_maxval]\n" "mov x14, #0x0\n" "ld1r { v12.8h }, [x20]\n" "mov x13, #0x0\n" "add x12, %x[params], %[offsetof_Params_inptrs]\n" "ldr x11, [%x[params], %[offsetof_Params_weights]]\n" "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n" "ldr x9, [%x[params], %[offsetof_Params_requant_shifts]]\n" "ldp x28, x27, [x22, #0x0]\n" "ldp x26, x25, [x22, #0x10]\n" "cbz x15, 3f\n" "ldr d19, [x11, #0x0]\n" "ldr d7, [x11, #0x8]\n" "subs x15, x15, #0x1\n" "usubl v19.8h, v19.8b, v18.8b\n" "ldr d1, [x11, #0x10]\n" "ldr d17, [x11, #0x18]\n" "usubl v7.8h, v7.8b, v18.8b\n" "usubl v1.8h, v1.8b, v18.8b\n" "ldr d8, [x11, #0x20]\n" "ldr d31, [x11, #0x28]\n" "usubl v17.8h, v17.8b, v18.8b\n" "usubl v8.8h, v8.8b, v18.8b\n" "ldr d29, [x11, #0x30]\n" "ldr d16, [x11, #0x38]\n" "usubl v31.8h, v31.8b, v18.8b\n" "usubl v29.8h, v29.8b, v18.8b\n" "ldr d4, [x11, #0x40]\n" "ldr x20, [%x[params], %[offsetof_Params_bias]]\n" "usubl v16.8h, v16.8b, v18.8b\n" "usubl v4.8h, v4.8b, v18.8b\n" "ldr q28, [x20, #0x0]\n" "ldr q9, [x20, #0x10]\n" "add x20, x20, #0x20\n" "str x20, [%x[params], %[offsetof_Params_bias]]\n" "ldp x23, x22, [x12, #0x0]\n" "ldp x21, x20, [x12, #0x10]\n" "mov v3.16b, v28.16b\n" "mov v30.16b, v9.16b\n" "ldr d23, [x23, x14]\n" "ldr d10, [x22, x14]\n" "mov v0.16b, v28.16b\n" "mov v22.16b, v9.16b\n" "ldr d11, [x21, x14]\n" "ldr d13, [x20, x14]\n" "mov v6.16b, v28.16b\n" "mov v2.16b, v9.16b\n" "ldr x20, [x12, #0x20]\n" "ldr d27, [x20, x14]\n" "ushll v23.8h, v23.8b, #0x0\n" "ushll v10.8h, v10.8b, #0x0\n" "ushll v11.8h, v11.8b, #0x0\n" "ushll v13.8h, v13.8b, #0x0\n" "ushll v27.8h, v27.8b, #0x0\n" "beq 2f\n" "1:" // Loop "ldr q24, [x10, #0x0]\n" "ldr q25, [x9, #0x0]\n" "smlal v28.4s, v23.4h, v8.4h\n" "smlal2 v9.4s, v23.8h, v8.8h\n" "ldr q20, [x10, #0x10]\n" "ldr q26, [x9, #0x10]\n" "smlal v28.4s, v10.4h, v19.4h\n" "smlal v3.4s, v23.4h, v17.4h\n" "ldr x20, [x12, #0x28]\n" "ldr d21, [x20, x14]\n" "smlal v0.4s, v23.4h, v7.4h\n" "smlal v6.4s, v23.4h, v19.4h\n" "smlal2 v9.4s, v10.8h, v19.8h\n" "ldr x20, [x12, #0x38]\n" "ldr d10, [x20, x14]\n" "smlal v28.4s, v13.4h, v31.4h\n" "smlal2 v30.4s, v23.8h, v17.8h\n" "smlal2 v22.4s, v23.8h, v7.8h\n" "ldr x20, [x12, #0x30]\n" "ldr d15, [x20, x14]\n" "smlal2 v2.4s, v23.8h, v19.8h\n" "smlal v3.4s, v11.4h, v1.4h\n" "ushll v21.8h, v21.8b, #0x0\n" "ldr x20, [x12, #0x40]\n" "ldr d23, [x20, x14]\n" "smlal v0.4s, v13.4h, v1.4h\n" "smlal v6.4s, v13.4h, v7.4h\n" "ushll v10.8h, v10.8b, #0x0\n" "smlal2 v9.4s, v13.8h, v31.8h\n" "smlal v28.4s, v27.4h, v16.4h\n" "ldr x20, [x12, #0x48]\n" "ushll v15.8h, v15.8b, #0x0\n" "smlal2 v30.4s, v11.8h, v1.8h\n" "ldr d11, [x20, x14]\n" "smlal2 v22.4s, v13.8h, v1.8h\n" "ushll v23.8h, v23.8b, #0x0\n" "smlal2 v2.4s, v13.8h, v7.8h\n" "smlal v3.4s, v13.4h, v8.4h\n" "ldr x21, [x12, #0x50]\n" "ldr x20, [x12, #0x58]\n" "smlal v0.4s, v21.4h, v29.4h\n" "smlal v6.4s, v27.4h, v17.4h\n" "ushll v11.8h, v11.8b, #0x0\n" "ldr x24, [x12, #0x60]\n" "smlal2 v9.4s, v27.8h, v16.8h\n" "smlal v28.4s, v10.4h, v7.4h\n" "ldr x23, [x12, #0x68]\n" "ldr x22, [x12, #0x70]\n" "smlal2 v30.4s, v13.8h, v8.8h\n" "ldr d13, [x21, x14]\n" "smlal2 v22.4s, v21.8h, v29.8h\n" "ldr d21, [x20, x14]\n" "smlal2 v2.4s, v27.8h, v17.8h\n" "smlal v3.4s, v27.4h, v29.4h\n" "ushll v13.8h, v13.8b, #0x0\n" "ldr x21, [x12, #0x78]\n" "smlal v0.4s, v27.4h, v8.4h\n" "smlal v6.4s, v15.4h, v4.4h\n" "ushll v21.8h, v21.8b, #0x0\n" "ldr x20, [%x[params], %[offsetof_Params_bias]]\n" "smlal2 v9.4s, v10.8h, v7.8h\n" "smlal v28.4s, v23.4h, v1.4h\n" "add x11, x11, #0x48\n" "subs x15, x15, #0x1\n" "smlal2 v30.4s, v27.8h, v29.8h\n" "smlal2 v22.4s, v27.8h, v8.8h\n" "ldr d27, [x24, x14]\n" "ushll v27.8h, v27.8b, #0x0\n" "smlal2 v2.4s, v15.8h, v4.8h\n" "ldr d15, [x23, x14]\n" "smlal v3.4s, v10.4h, v19.4h\n" "ushll v15.8h, v15.8b, #0x0\n" "smlal v0.4s, v11.4h, v31.4h\n" "smlal v6.4s, v11.4h, v8.4h\n" "add x10, x10, #0x20\n" "add x9, x9, #0x20\n" "smlal2 v9.4s, v23.8h, v1.8h\n" "smlal v28.4s, v11.4h, v4.4h\n" "smlal2 v30.4s, v10.8h, v19.8h\n" "ldr d10, [x22, x14]\n" "smlal2 v22.4s, v11.8h, v31.8h\n" "ushll v10.8h, v10.8b, #0x0\n" "smlal2 v2.4s, v11.8h, v8.8h\n" "ldr d8, [x21, x14]\n" "smlal v3.4s, v23.4h, v7.4h\n" "ushll v8.8h, v8.8b, #0x0\n" "smlal v0.4s, v13.4h, v19.4h\n" "smlal v6.4s, v21.4h, v1.4h\n" "add x14, x14, #0x8\n" "smlal2 v9.4s, v11.8h, v4.8h\n" "smlal v28.4s, v13.4h, v17.4h\n" "smlal2 v30.4s, v23.8h, v7.8h\n" "smlal2 v22.4s, v13.8h, v19.8h\n" "smlal2 v2.4s, v21.8h, v1.8h\n" "smlal v3.4s, v11.4h, v16.4h\n" "smlal v0.4s, v27.4h, v17.4h\n" "smlal v6.4s, v15.4h, v31.4h\n" "smlal2 v9.4s, v13.8h, v17.8h\n" "smlal v28.4s, v27.4h, v29.4h\n" "sqrdmulh v28.4s, v28.4s, v24.4s\n" "smlal2 v30.4s, v11.8h, v16.8h\n" "smlal2 v22.4s, v27.8h, v17.8h\n" "and v17.16b, v28.16b, v25.16b\n" "smlal2 v2.4s, v15.8h, v31.8h\n" "smlal v3.4s, v21.4h, v31.4h\n" "sshr v17.4s, v17.4s, #0x1f\n" "smlal v0.4s, v10.4h, v16.4h\n" "smlal v6.4s, v10.4h, v29.4h\n" "sqadd v28.4s, v28.4s, v17.4s\n" "smlal2 v9.4s, v27.8h, v29.8h\n" "smlal2 v30.4s, v21.8h, v31.8h\n" "sqrdmulh v9.4s, v9.4s, v20.4s\n" "smlal2 v22.4s, v10.8h, v16.8h\n" "smlal2 v2.4s, v10.8h, v29.8h\n" "and v23.16b, v9.16b, v26.16b\n" "smlal v3.4s, v15.4h, v4.4h\n" "smlal v0.4s, v8.4h, v4.4h\n" "sqrdmulh v3.4s, v3.4s, v24.4s\n" "smlal v6.4s, v8.4h, v16.4h\n" "smlal2 v30.4s, v15.8h, v4.8h\n" "sqrdmulh v0.4s, v0.4s, v24.4s\n" "smlal2 v22.4s, v8.8h, v4.8h\n" "smlal2 v2.4s, v8.8h, v16.8h\n" "sqrdmulh v6.4s, v6.4s, v24.4s\n" "sshr v23.4s, v23.4s, #0x1f\n" "and v8.16b, v3.16b, v25.16b\n" "sqrdmulh v30.4s, v30.4s, v20.4s\n" "and v11.16b, v0.16b, v25.16b\n" "sqrdmulh v22.4s, v22.4s, v20.4s\n" "and v29.16b, v6.16b, v25.16b\n" "sqrdmulh v2.4s, v2.4s, v20.4s\n" "sqadd v9.4s, v9.4s, v23.4s\n" "sshr v8.4s, v8.4s, #0x1f\n" "and v13.16b, v30.16b, v26.16b\n" "sshr v11.4s, v11.4s, #0x1f\n" "and v21.16b, v22.16b, v26.16b\n" "sshr v29.4s, v29.4s, #0x1f\n" "and v23.16b, v2.16b, v26.16b\n" "sqadd v3.4s, v3.4s, v8.4s\n" "sshr v13.4s, v13.4s, #0x1f\n" "sqadd v0.4s, v0.4s, v11.4s\n" "sshr v21.4s, v21.4s, #0x1f\n" "sqadd v6.4s, v6.4s, v29.4s\n" "sshr v23.4s, v23.4s, #0x1f\n" "srshl v28.4s, v28.4s, v25.4s\n" "srshl v3.4s, v3.4s, v25.4s\n" "sqadd v30.4s, v30.4s, v13.4s\n" "srshl v0.4s, v0.4s, v25.4s\n" "sqadd v22.4s, v22.4s, v21.4s\n" "srshl v6.4s, v6.4s, v25.4s\n" "sqadd v2.4s, v2.4s, v23.4s\n" "srshl v9.4s, v9.4s, v26.4s\n" "sqxtn v28.4h, v28.4s\n" "srshl v30.4s, v30.4s, v26.4s\n" "sqxtn v3.4h, v3.4s\n" "srshl v22.4s, v22.4s, v26.4s\n" "sqxtn v0.4h, v0.4s\n" "srshl v2.4s, v2.4s, v26.4s\n" "sqxtn v6.4h, v6.4s\n" "sqxtn2 v28.8h, v9.4s\n" "sqxtn2 v3.8h, v30.4s\n" "sqxtn2 v0.8h, v22.4s\n" "sqxtn2 v6.8h, v2.4s\n" "sqadd v28.8h, v28.8h, v5.8h\n" "sqadd v3.8h, v3.8h, v5.8h\n" "sqadd v0.8h, v0.8h, v5.8h\n" "sqadd v6.8h, v6.8h, v5.8h\n" "smax v28.8h, v28.8h, v14.8h\n" "smax v3.8h, v3.8h, v14.8h\n" "smax v0.8h, v0.8h, v14.8h\n" "smax v6.8h, v6.8h, v14.8h\n" "smin v28.8h, v28.8h, v12.8h\n" "smin v3.8h, v3.8h, v12.8h\n" "smin v0.8h, v0.8h, v12.8h\n" "smin v6.8h, v6.8h, v12.8h\n" "uzp1 v28.16b, v28.16b, v28.16b\n" "str d28, [x28, x13]\n" "uzp1 v3.16b, v3.16b, v3.16b\n" "uzp1 v0.16b, v0.16b, v0.16b\n" "str d3, [x27, x13]\n" "uzp1 v6.16b, v6.16b, v6.16b\n" "str d0, [x26, x13]\n" "str d6, [x25, x13]\n" "ldr q28, [x20, #0x0]\n" "ldr q9, [x20, #0x10]\n" "add x20, x20, #0x20\n" "ldr d19, [x11, #0x0]\n" "ldr d7, [x11, #0x8]\n" "add x13, x13, #0x8\n" "str x20, [%x[params], %[offsetof_Params_bias]]\n" "ldr d1, [x11, #0x10]\n" "ldr d17, [x11, #0x18]\n" "mov v3.16b, v28.16b\n" "mov v30.16b, v9.16b\n" "ldr d8, [x11, #0x20]\n" "ldr d31, [x11, #0x28]\n" "mov v0.16b, v28.16b\n" "mov v22.16b, v9.16b\n" "ldr d29, [x11, #0x30]\n" "ldr d16, [x11, #0x38]\n" "mov v6.16b, v28.16b\n" "mov v2.16b, v9.16b\n" "ldr d4, [x11, #0x40]\n" "ldp x23, x22, [x12, #0x0]\n" "usubl v19.8h, v19.8b, v18.8b\n" "usubl v7.8h, v7.8b, v18.8b\n" "ldp x21, x20, [x12, #0x10]\n" "ldr d23, [x23, x14]\n" "usubl v1.8h, v1.8b, v18.8b\n" "usubl v17.8h, v17.8b, v18.8b\n" "ldr d10, [x22, x14]\n" "ldr d11, [x21, x14]\n" "usubl v8.8h, v8.8b, v18.8b\n" "usubl v31.8h, v31.8b, v18.8b\n" "ldr d13, [x20, x14]\n" "ldr x20, [x12, #0x20]\n" "usubl v29.8h, v29.8b, v18.8b\n" "usubl v16.8h, v16.8b, v18.8b\n" "ldr d27, [x20, x14]\n" "usubl v4.8h, v4.8b, v18.8b\n" "ushll v23.8h, v23.8b, #0x0\n" "ushll v10.8h, v10.8b, #0x0\n" "ushll v11.8h, v11.8b, #0x0\n" "ushll v13.8h, v13.8b, #0x0\n" "ushll v27.8h, v27.8b, #0x0\n" "bgt 1b\n" "2:" // Tail "ldr q26, [x10, #0x0]\n" "ldr q25, [x9, #0x0]\n" "smlal v28.4s, v23.4h, v8.4h\n" "smlal2 v9.4s, v23.8h, v8.8h\n" "ldr q24, [x10, #0x10]\n" "ldr q20, [x9, #0x10]\n" "smlal v28.4s, v10.4h, v19.4h\n" "smlal v3.4s, v23.4h, v17.4h\n" "ldr x20, [x12, #0x28]\n" "ldr d21, [x20, x14]\n" "smlal v0.4s, v23.4h, v7.4h\n" "smlal v6.4s, v23.4h, v19.4h\n" "smlal2 v9.4s, v10.8h, v19.8h\n" "ldr x20, [x12, #0x38]\n" "ldr d15, [x20, x14]\n" "smlal v28.4s, v13.4h, v31.4h\n" "smlal2 v30.4s, v23.8h, v17.8h\n" "smlal2 v22.4s, v23.8h, v7.8h\n" "ldr x20, [x12, #0x30]\n" "ldr d10, [x20, x14]\n" "smlal2 v2.4s, v23.8h, v19.8h\n" "smlal v3.4s, v11.4h, v1.4h\n" "ushll v21.8h, v21.8b, #0x0\n" "ldr x20, [x12, #0x40]\n" "ldr d23, [x20, x14]\n" "smlal v0.4s, v13.4h, v1.4h\n" "smlal v6.4s, v13.4h, v7.4h\n" "ushll v15.8h, v15.8b, #0x0\n" "smlal2 v9.4s, v13.8h, v31.8h\n" "smlal v28.4s, v27.4h, v16.4h\n" "ldr x20, [x12, #0x48]\n" "ushll v10.8h, v10.8b, #0x0\n" "smlal2 v30.4s, v11.8h, v1.8h\n" "ldr d11, [x20, x14]\n" "smlal2 v22.4s, v13.8h, v1.8h\n" "ushll v23.8h, v23.8b, #0x0\n" "smlal2 v2.4s, v13.8h, v7.8h\n" "smlal v3.4s, v13.4h, v8.4h\n" "ldr x24, [x12, #0x50]\n" "ldr x20, [x12, #0x58]\n" "smlal v0.4s, v21.4h, v29.4h\n" "smlal v6.4s, v27.4h, v17.4h\n" "ushll v11.8h, v11.8b, #0x0\n" "ldr x23, [x12, #0x60]\n" "smlal2 v9.4s, v27.8h, v16.8h\n" "smlal v28.4s, v15.4h, v7.4h\n" "ldr x22, [x12, #0x68]\n" "ldr x21, [x12, #0x70]\n" "smlal2 v30.4s, v13.8h, v8.8h\n" "ldr d13, [x24, x14]\n" "smlal2 v22.4s, v21.8h, v29.8h\n" "ldr d21, [x20, x14]\n" "smlal2 v2.4s, v27.8h, v17.8h\n" "smlal v3.4s, v27.4h, v29.4h\n" "ushll v13.8h, v13.8b, #0x0\n" "ldr x20, [x12, #0x78]\n" "smlal v0.4s, v27.4h, v8.4h\n" "smlal v6.4s, v10.4h, v4.4h\n" "ushll v21.8h, v21.8b, #0x0\n" "tst x16, #0x7\n" "smlal2 v9.4s, v15.8h, v7.8h\n" "smlal v28.4s, v23.4h, v1.4h\n" "add x10, x10, #0x20\n" "add x9, x9, #0x20\n" "smlal2 v30.4s, v27.8h, v29.8h\n" "smlal2 v22.4s, v27.8h, v8.8h\n" "ldr d27, [x23, x14]\n" "ushll v27.8h, v27.8b, #0x0\n" "smlal2 v2.4s, v10.8h, v4.8h\n" "ldr d10, [x22, x14]\n" "smlal v3.4s, v15.4h, v19.4h\n" "ushll v10.8h, v10.8b, #0x0\n" "smlal v0.4s, v11.4h, v31.4h\n" "smlal v6.4s, v11.4h, v8.4h\n" "smlal2 v9.4s, v23.8h, v1.8h\n" "smlal v28.4s, v11.4h, v4.4h\n" "smlal2 v30.4s, v15.8h, v19.8h\n" "ldr d15, [x21, x14]\n" "smlal2 v22.4s, v11.8h, v31.8h\n" "ushll v15.8h, v15.8b, #0x0\n" "smlal2 v2.4s, v11.8h, v8.8h\n" "ldr d8, [x20, x14]\n" "smlal v3.4s, v23.4h, v7.4h\n" "ushll v8.8h, v8.8b, #0x0\n" "smlal v0.4s, v13.4h, v19.4h\n" "smlal v6.4s, v21.4h, v1.4h\n" "add x14, x14, #0x8\n" "smlal2 v9.4s, v11.8h, v4.8h\n" "smlal v28.4s, v13.4h, v17.4h\n" "smlal2 v30.4s, v23.8h, v7.8h\n" "smlal2 v22.4s, v13.8h, v19.8h\n" "smlal2 v2.4s, v21.8h, v1.8h\n" "smlal v3.4s, v11.4h, v16.4h\n" "smlal v0.4s, v27.4h, v17.4h\n" "smlal v6.4s, v10.4h, v31.4h\n" "smlal2 v9.4s, v13.8h, v17.8h\n" "smlal v28.4s, v27.4h, v29.4h\n" "sqrdmulh v28.4s, v28.4s, v26.4s\n" "smlal2 v30.4s, v11.8h, v16.8h\n" "smlal2 v22.4s, v27.8h, v17.8h\n" "and v1.16b, v28.16b, v25.16b\n" "smlal2 v2.4s, v10.8h, v31.8h\n" "smlal v3.4s, v21.4h, v31.4h\n" "sshr v1.4s, v1.4s, #0x1f\n" "smlal v0.4s, v15.4h, v16.4h\n" "smlal v6.4s, v15.4h, v29.4h\n" "sqadd v28.4s, v28.4s, v1.4s\n" "smlal2 v9.4s, v27.8h, v29.8h\n" "smlal2 v30.4s, v21.8h, v31.8h\n" "sqrdmulh v9.4s, v9.4s, v24.4s\n" "smlal2 v22.4s, v15.8h, v16.8h\n" "smlal2 v2.4s, v15.8h, v29.8h\n" "and v27.16b, v9.16b, v20.16b\n" "smlal v3.4s, v10.4h, v4.4h\n" "smlal v0.4s, v8.4h, v4.4h\n" "sqrdmulh v3.4s, v3.4s, v26.4s\n" "smlal v6.4s, v8.4h, v16.4h\n" "smlal2 v30.4s, v10.8h, v4.8h\n" "sqrdmulh v0.4s, v0.4s, v26.4s\n" "smlal2 v22.4s, v8.8h, v4.8h\n" "smlal2 v2.4s, v8.8h, v16.8h\n" "sqrdmulh v6.4s, v6.4s, v26.4s\n" "sshr v27.4s, v27.4s, #0x1f\n" "and v16.16b, v3.16b, v25.16b\n" "sqrdmulh v30.4s, v30.4s, v24.4s\n" "and v4.16b, v0.16b, v25.16b\n" "sqrdmulh v22.4s, v22.4s, v24.4s\n" "and v17.16b, v6.16b, v25.16b\n" "sqrdmulh v2.4s, v2.4s, v24.4s\n" "sqadd v9.4s, v9.4s, v27.4s\n" "sshr v16.4s, v16.4s, #0x1f\n" "and v8.16b, v30.16b, v20.16b\n" "sshr v4.4s, v4.4s, #0x1f\n" "and v26.16b, v22.16b, v20.16b\n" "sshr v17.4s, v17.4s, #0x1f\n" "and v11.16b, v2.16b, v20.16b\n" "sqadd v3.4s, v3.4s, v16.4s\n" "sshr v8.4s, v8.4s, #0x1f\n" "sqadd v0.4s, v0.4s, v4.4s\n" "sshr v26.4s, v26.4s, #0x1f\n" "sqadd v6.4s, v6.4s, v17.4s\n" "sshr v11.4s, v11.4s, #0x1f\n" "srshl v28.4s, v28.4s, v25.4s\n" "srshl v3.4s, v3.4s, v25.4s\n" "sqadd v30.4s, v30.4s, v8.4s\n" "srshl v0.4s, v0.4s, v25.4s\n" "sqadd v22.4s, v22.4s, v26.4s\n" "srshl v6.4s, v6.4s, v25.4s\n" "sqadd v2.4s, v2.4s, v11.4s\n" "srshl v9.4s, v9.4s, v20.4s\n" "sqxtn v28.4h, v28.4s\n" "srshl v30.4s, v30.4s, v20.4s\n" "sqxtn v3.4h, v3.4s\n" "srshl v22.4s, v22.4s, v20.4s\n" "sqxtn v0.4h, v0.4s\n" "srshl v2.4s, v2.4s, v20.4s\n" "sqxtn v6.4h, v6.4s\n" "sqxtn2 v28.8h, v9.4s\n" "sqxtn2 v3.8h, v30.4s\n" "sqxtn2 v0.8h, v22.4s\n" "sqxtn2 v6.8h, v2.4s\n" "sqadd v28.8h, v28.8h, v5.8h\n" "sqadd v3.8h, v3.8h, v5.8h\n" "sqadd v0.8h, v0.8h, v5.8h\n" "sqadd v6.8h, v6.8h, v5.8h\n" "smax v28.8h, v28.8h, v14.8h\n" "smax v3.8h, v3.8h, v14.8h\n" "smax v0.8h, v0.8h, v14.8h\n" "smax v6.8h, v6.8h, v14.8h\n" "smin v28.8h, v28.8h, v12.8h\n" "smin v3.8h, v3.8h, v12.8h\n" "smin v0.8h, v0.8h, v12.8h\n" "smin v6.8h, v6.8h, v12.8h\n" "uzp1 v28.16b, v28.16b, v28.16b\n" "str d28, [x28, x13]\n" "uzp1 v3.16b, v3.16b, v3.16b\n" "uzp1 v0.16b, v0.16b, v0.16b\n" "str d3, [x27, x13]\n" "uzp1 v6.16b, v6.16b, v6.16b\n" "str d0, [x26, x13]\n" "str d6, [x25, x13]\n" "add x13, x13, #0x8\n" "beq 64f\n" "add x11, x11, #0x48\n" "3:" // Oddments "ldr x20, [%x[params], %[offsetof_Params_bias]]\n" "tbz x16, #2, 5f\n" "ld1 { v28.4s }, [x20], #0x10\n" "tbz x16, #1, 4f\n" "ld1 { v9.d }[0], [x20], #0x8\n" "tbz x16, #0, 7f\n" "ld1 { v9.s }[2], [x20]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset "tbz x16, #0, 7f\n" "ld1 { v9.s }[0], [x20]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset "tbz x16, #1, 6f\n" "ld1 { v28.d }[0], [x20], #0x8\n" "tbz x16, #0, 7f\n" "ld1 { v28.s }[2], [x20]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 7f\n" "ld1 { v28.s }[0], [x20]\n" "7:" // Oddments: Load bias: Bit 2: End "ldr d19, [x11, #0x0]\n" "ldr d7, [x11, #0x8]\n" "mov v3.16b, v28.16b\n" "mov v30.16b, v9.16b\n" "ldr d1, [x11, #0x10]\n" "ldr d17, [x11, #0x18]\n" "mov v0.16b, v28.16b\n" "mov v22.16b, v9.16b\n" "ldr d8, [x11, #0x20]\n" "ldr d31, [x11, #0x28]\n" "mov v6.16b, v28.16b\n" "mov v2.16b, v9.16b\n" "ldr d29, [x11, #0x30]\n" "ldr d16, [x11, #0x38]\n" "usubl v19.8h, v19.8b, v18.8b\n" "usubl v7.8h, v7.8b, v18.8b\n" "ldr d4, [x11, #0x40]\n" "ldp x24, x23, [x12, #0x0]\n" "usubl v1.8h, v1.8b, v18.8b\n" "usubl v17.8h, v17.8b, v18.8b\n" "ldp x22, x21, [x12, #0x10]\n" "ldr x20, [x12, #0x20]\n" "usubl v8.8h, v8.8b, v18.8b\n" "usubl v31.8h, v31.8b, v18.8b\n" "usubl v29.8h, v29.8b, v18.8b\n" "usubl v16.8h, v16.8b, v18.8b\n" "usubl v4.8h, v4.8b, v18.8b\n" "add x24, x24, x14\n" "add x23, x23, x14\n" "add x22, x22, x14\n" "add x21, x21, x14\n" "add x20, x20, x14\n" "tbz x16, #2, 9f\n" "ld1 { v23.s }[0], [x24], #0x4\n" "ld1 { v10.s }[0], [x23], #0x4\n" "ld1 { v11.s }[0], [x22], #0x4\n" "ld1 { v13.s }[0], [x21], #0x4\n" "ld1 { v27.s }[0], [x20], #0x4\n" "tbz x16, #1, 8f\n" "ld1 { v23.h }[2], [x24], #0x2\n" "ld1 { v10.h }[2], [x23], #0x2\n" "ld1 { v11.h }[2], [x22], #0x2\n" "ld1 { v13.h }[2], [x21], #0x2\n" "ld1 { v27.h }[2], [x20], #0x2\n" "tbz x16, #0, 11f\n" "ld1 { v23.b }[6], [x24]\n" "ld1 { v10.b }[6], [x23]\n" "ld1 { v11.b }[6], [x22]\n" "ld1 { v13.b }[6], [x21]\n" "ld1 { v27.b }[6], [x20]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset "tbz x16, #0, 11f\n" "ld1 { v23.b }[4], [x24]\n" "ld1 { v10.b }[4], [x23]\n" "ld1 { v11.b }[4], [x22]\n" "ld1 { v13.b }[4], [x21]\n" "ld1 { v27.b }[4], [x20]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset "tbz x16, #1, 10f\n" "ld1 { v23.h }[0], [x24], #0x2\n" "ld1 { v10.h }[0], [x23], #0x2\n" "ld1 { v11.h }[0], [x22], #0x2\n" "ld1 { v13.h }[0], [x21], #0x2\n" "ld1 { v27.h }[0], [x20], #0x2\n" "tbz x16, #0, 11f\n" "ld1 { v23.b }[2], [x24]\n" "ld1 { v10.b }[2], [x23]\n" "ld1 { v11.b }[2], [x22]\n" "ld1 { v13.b }[2], [x21]\n" "ld1 { v27.b }[2], [x20]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 11f\n" "ld1 { v23.b }[0], [x24]\n" "ld1 { v10.b }[0], [x23]\n" "ld1 { v11.b }[0], [x22]\n" "ld1 { v13.b }[0], [x21]\n" "ld1 { v27.b }[0], [x20]\n" "11:" // Oddments: Initial loads: Bit 2: End "ushll v23.8h, v23.8b, #0x0\n" "smlal v28.4s, v23.4h, v8.4h\n" "smlal2 v9.4s, v23.8h, v8.8h\n" "ldr x20, [x12, #0x28]\n" "smlal v3.4s, v23.4h, v17.4h\n" "smlal2 v30.4s, v23.8h, v17.8h\n" "ushll v10.8h, v10.8b, #0x0\n" "ushll v11.8h, v11.8b, #0x0\n" "smlal v0.4s, v23.4h, v7.4h\n" "smlal2 v22.4s, v23.8h, v7.8h\n" "add x20, x20, x14\n" "smlal v6.4s, v23.4h, v19.4h\n" "smlal2 v2.4s, v23.8h, v19.8h\n" "ushll v13.8h, v13.8b, #0x0\n" "smlal v28.4s, v10.4h, v19.4h\n" "smlal2 v9.4s, v10.8h, v19.8h\n" "ushll v27.8h, v27.8b, #0x0\n" "smlal v3.4s, v11.4h, v1.4h\n" "smlal2 v30.4s, v11.8h, v1.8h\n" "smlal v28.4s, v13.4h, v31.4h\n" "smlal2 v9.4s, v13.8h, v31.8h\n" "smlal v3.4s, v13.4h, v8.4h\n" "smlal2 v30.4s, v13.8h, v8.8h\n" "smlal v0.4s, v13.4h, v1.4h\n" "smlal2 v22.4s, v13.8h, v1.8h\n" "smlal v6.4s, v13.4h, v7.4h\n" "smlal2 v2.4s, v13.8h, v7.8h\n" "tbz x16, #2, 13f\n" "ld1 { v26.s }[0], [x20], #0x4\n" "tbz x16, #1, 12f\n" "ld1 { v26.h }[2], [x20], #0x2\n" "tbz x16, #0, 15f\n" "ld1 { v26.b }[6], [x20]\n" "b 15f\n" "12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset "tbz x16, #0, 15f\n" "ld1 { v26.b }[4], [x20]\n" "b 15f\n" "13:" // Oddments: Load (3, 0): Bit 2: Unset "tbz x16, #1, 14f\n" "ld1 { v26.h }[0], [x20], #0x2\n" "tbz x16, #0, 15f\n" "ld1 { v26.b }[2], [x20]\n" "b 15f\n" "14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 15f\n" "ld1 { v26.b }[0], [x20]\n" "15:" // Oddments: Load (3, 0): Bit 2: End "ushll v26.8h, v26.8b, #0x0\n" "smlal v0.4s, v26.4h, v29.4h\n" "smlal2 v22.4s, v26.8h, v29.8h\n" "ldr x20, [x12, #0x30]\n" "smlal v28.4s, v27.4h, v16.4h\n" "smlal2 v9.4s, v27.8h, v16.8h\n" "add x20, x20, x14\n" "smlal v3.4s, v27.4h, v29.4h\n" "smlal2 v30.4s, v27.8h, v29.8h\n" "smlal v0.4s, v27.4h, v8.4h\n" "smlal2 v22.4s, v27.8h, v8.8h\n" "smlal v6.4s, v27.4h, v17.4h\n" "smlal2 v2.4s, v27.8h, v17.8h\n" "tbz x16, #2, 17f\n" "ld1 { v23.s }[0], [x20], #0x4\n" "tbz x16, #1, 16f\n" "ld1 { v23.h }[2], [x20], #0x2\n" "tbz x16, #0, 19f\n" "ld1 { v23.b }[6], [x20]\n" "b 19f\n" "16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset "tbz x16, #0, 19f\n" "ld1 { v23.b }[4], [x20]\n" "b 19f\n" "17:" // Oddments: Load (3, 3): Bit 2: Unset "tbz x16, #1, 18f\n" "ld1 { v23.h }[0], [x20], #0x2\n" "tbz x16, #0, 19f\n" "ld1 { v23.b }[2], [x20]\n" "b 19f\n" "18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 19f\n" "ld1 { v23.b }[0], [x20]\n" "19:" // Oddments: Load (3, 3): Bit 2: End "ushll v23.8h, v23.8b, #0x0\n" "ldr x20, [x12, #0x38]\n" "smlal v6.4s, v23.4h, v4.4h\n" "smlal2 v2.4s, v23.8h, v4.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 21f\n" "ld1 { v21.s }[0], [x20], #0x4\n" "tbz x16, #1, 20f\n" "ld1 { v21.h }[2], [x20], #0x2\n" "tbz x16, #0, 23f\n" "ld1 { v21.b }[6], [x20]\n" "b 23f\n" "20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset "tbz x16, #0, 23f\n" "ld1 { v21.b }[4], [x20]\n" "b 23f\n" "21:" // Oddments: Load (0, 1): Bit 2: Unset "tbz x16, #1, 22f\n" "ld1 { v21.h }[0], [x20], #0x2\n" "tbz x16, #0, 23f\n" "ld1 { v21.b }[2], [x20]\n" "b 23f\n" "22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 23f\n" "ld1 { v21.b }[0], [x20]\n" "23:" // Oddments: Load (0, 1): Bit 2: End "ushll v21.8h, v21.8b, #0x0\n" "ldr x20, [x12, #0x40]\n" "smlal v28.4s, v21.4h, v7.4h\n" "smlal2 v9.4s, v21.8h, v7.8h\n" "smlal v3.4s, v21.4h, v19.4h\n" "smlal2 v30.4s, v21.8h, v19.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 25f\n" "ld1 { v18.s }[0], [x20], #0x4\n" "tbz x16, #1, 24f\n" "ld1 { v18.h }[2], [x20], #0x2\n" "tbz x16, #0, 27f\n" "ld1 { v18.b }[6], [x20]\n" "b 27f\n" "24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset "tbz x16, #0, 27f\n" "ld1 { v18.b }[4], [x20]\n" "b 27f\n" "25:" // Oddments: Load (0, 2): Bit 2: Unset "tbz x16, #1, 26f\n" "ld1 { v18.h }[0], [x20], #0x2\n" "tbz x16, #0, 27f\n" "ld1 { v18.b }[2], [x20]\n" "b 27f\n" "26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 27f\n" "ld1 { v18.b }[0], [x20]\n" "27:" // Oddments: Load (0, 2): Bit 2: End "ushll v18.8h, v18.8b, #0x0\n" "ldr x20, [x12, #0x48]\n" "smlal v28.4s, v18.4h, v1.4h\n" "smlal2 v9.4s, v18.8h, v1.8h\n" "smlal v3.4s, v18.4h, v7.4h\n" "smlal2 v30.4s, v18.8h, v7.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 29f\n" "ld1 { v15.s }[0], [x20], #0x4\n" "tbz x16, #1, 28f\n" "ld1 { v15.h }[2], [x20], #0x2\n" "tbz x16, #0, 31f\n" "ld1 { v15.b }[6], [x20]\n" "b 31f\n" "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset "tbz x16, #0, 31f\n" "ld1 { v15.b }[4], [x20]\n" "b 31f\n" "29:" // Oddments: Load (2, 2): Bit 2: Unset "tbz x16, #1, 30f\n" "ld1 { v15.h }[0], [x20], #0x2\n" "tbz x16, #0, 31f\n" "ld1 { v15.b }[2], [x20]\n" "b 31f\n" "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 31f\n" "ld1 { v15.b }[0], [x20]\n" "31:" // Oddments: Load (2, 2): Bit 2: End "ushll v15.8h, v15.8b, #0x0\n" "ldr x20, [x12, #0x50]\n" "smlal v28.4s, v15.4h, v4.4h\n" "smlal2 v9.4s, v15.8h, v4.8h\n" "smlal v3.4s, v15.4h, v16.4h\n" "smlal2 v30.4s, v15.8h, v16.8h\n" "add x20, x20, x14\n" "smlal v0.4s, v15.4h, v31.4h\n" "smlal2 v22.4s, v15.8h, v31.8h\n" "smlal v6.4s, v15.4h, v8.4h\n" "smlal2 v2.4s, v15.8h, v8.8h\n" "tbz x16, #2, 33f\n" "ld1 { v20.s }[0], [x20], #0x4\n" "tbz x16, #1, 32f\n" "ld1 { v20.h }[2], [x20], #0x2\n" "tbz x16, #0, 35f\n" "ld1 { v20.b }[6], [x20]\n" "b 35f\n" "32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset "tbz x16, #0, 35f\n" "ld1 { v20.b }[4], [x20]\n" "b 35f\n" "33:" // Oddments: Load (1, 0): Bit 2: Unset "tbz x16, #1, 34f\n" "ld1 { v20.h }[0], [x20], #0x2\n" "tbz x16, #0, 35f\n" "ld1 { v20.b }[2], [x20]\n" "b 35f\n" "34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 35f\n" "ld1 { v20.b }[0], [x20]\n" "35:" // Oddments: Load (1, 0): Bit 2: End "ushll v20.8h, v20.8b, #0x0\n" "ldr x20, [x12, #0x58]\n" "smlal v28.4s, v20.4h, v17.4h\n" "smlal2 v9.4s, v20.8h, v17.8h\n" "smlal v0.4s, v20.4h, v19.4h\n" "smlal2 v22.4s, v20.8h, v19.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 37f\n" "ld1 { v11.s }[0], [x20], #0x4\n" "tbz x16, #1, 36f\n" "ld1 { v11.h }[2], [x20], #0x2\n" "tbz x16, #0, 39f\n" "ld1 { v11.b }[6], [x20]\n" "b 39f\n" "36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset "tbz x16, #0, 39f\n" "ld1 { v11.b }[4], [x20]\n" "b 39f\n" "37:" // Oddments: Load (1, 3): Bit 2: Unset "tbz x16, #1, 38f\n" "ld1 { v11.h }[0], [x20], #0x2\n" "tbz x16, #0, 39f\n" "ld1 { v11.b }[2], [x20]\n" "b 39f\n" "38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 39f\n" "ld1 { v11.b }[0], [x20]\n" "39:" // Oddments: Load (1, 3): Bit 2: End "ushll v11.8h, v11.8b, #0x0\n" "ldr x20, [x12, #0x60]\n" "smlal v3.4s, v11.4h, v31.4h\n" "smlal2 v30.4s, v11.8h, v31.8h\n" "smlal v6.4s, v11.4h, v1.4h\n" "smlal2 v2.4s, v11.8h, v1.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 41f\n" "ld1 { v23.s }[0], [x20], #0x4\n" "tbz x16, #1, 40f\n" "ld1 { v23.h }[2], [x20], #0x2\n" "tbz x16, #0, 43f\n" "ld1 { v23.b }[6], [x20]\n" "b 43f\n" "40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset "tbz x16, #0, 43f\n" "ld1 { v23.b }[4], [x20]\n" "b 43f\n" "41:" // Oddments: Load (2, 0): Bit 2: Unset "tbz x16, #1, 42f\n" "ld1 { v23.h }[0], [x20], #0x2\n" "tbz x16, #0, 43f\n" "ld1 { v23.b }[2], [x20]\n" "b 43f\n" "42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 43f\n" "ld1 { v23.b }[0], [x20]\n" "43:" // Oddments: Load (2, 0): Bit 2: End "ushll v23.8h, v23.8b, #0x0\n" "ldr x20, [x12, #0x68]\n" "smlal v28.4s, v23.4h, v29.4h\n" "smlal2 v9.4s, v23.8h, v29.8h\n" "smlal v0.4s, v23.4h, v17.4h\n" "smlal2 v22.4s, v23.8h, v17.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 45f\n" "ld1 { v20.s }[0], [x20], #0x4\n" "tbz x16, #1, 44f\n" "ld1 { v20.h }[2], [x20], #0x2\n" "tbz x16, #0, 47f\n" "ld1 { v20.b }[6], [x20]\n" "b 47f\n" "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset "tbz x16, #0, 47f\n" "ld1 { v20.b }[4], [x20]\n" "b 47f\n" "45:" // Oddments: Load (2, 3): Bit 2: Unset "tbz x16, #1, 46f\n" "ld1 { v20.h }[0], [x20], #0x2\n" "tbz x16, #0, 47f\n" "ld1 { v20.b }[2], [x20]\n" "b 47f\n" "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 47f\n" "ld1 { v20.b }[0], [x20]\n" "47:" // Oddments: Load (2, 3): Bit 2: End "ushll v20.8h, v20.8b, #0x0\n" "ldr x20, [x12, #0x70]\n" "smlal v3.4s, v20.4h, v4.4h\n" "smlal2 v30.4s, v20.8h, v4.8h\n" "smlal v6.4s, v20.4h, v31.4h\n" "smlal2 v2.4s, v20.8h, v31.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 49f\n" "ld1 { v8.s }[0], [x20], #0x4\n" "tbz x16, #1, 48f\n" "ld1 { v8.h }[2], [x20], #0x2\n" "tbz x16, #0, 51f\n" "ld1 { v8.b }[6], [x20]\n" "b 51f\n" "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset "tbz x16, #0, 51f\n" "ld1 { v8.b }[4], [x20]\n" "b 51f\n" "49:" // Oddments: Load (3, 1): Bit 2: Unset "tbz x16, #1, 50f\n" "ld1 { v8.h }[0], [x20], #0x2\n" "tbz x16, #0, 51f\n" "ld1 { v8.b }[2], [x20]\n" "b 51f\n" "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 51f\n" "ld1 { v8.b }[0], [x20]\n" "51:" // Oddments: Load (3, 1): Bit 2: End "ushll v8.8h, v8.8b, #0x0\n" "ldr x20, [x12, #0x78]\n" "smlal v0.4s, v8.4h, v16.4h\n" "smlal2 v22.4s, v8.8h, v16.8h\n" "smlal v6.4s, v8.4h, v29.4h\n" "smlal2 v2.4s, v8.8h, v29.8h\n" "add x20, x20, x14\n" "tbz x16, #2, 53f\n" "ld1 { v8.s }[0], [x20], #0x4\n" "tbz x16, #1, 52f\n" "ld1 { v8.h }[2], [x20], #0x2\n" "tbz x16, #0, 55f\n" "ld1 { v8.b }[6], [x20]\n" "b 55f\n" "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset "tbz x16, #0, 55f\n" "ld1 { v8.b }[4], [x20]\n" "b 55f\n" "53:" // Oddments: Load (3, 2): Bit 2: Unset "tbz x16, #1, 54f\n" "ld1 { v8.h }[0], [x20], #0x2\n" "tbz x16, #0, 55f\n" "ld1 { v8.b }[2], [x20]\n" "b 55f\n" "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 55f\n" "ld1 { v8.b }[0], [x20]\n" "55:" // Oddments: Load (3, 2): Bit 2: End "ushll v8.8h, v8.8b, #0x0\n" "smlal v0.4s, v8.4h, v4.4h\n" "smlal2 v22.4s, v8.8h, v4.8h\n" "smlal v6.4s, v8.4h, v16.4h\n" "smlal2 v2.4s, v8.8h, v16.8h\n" "tbz x16, #2, 57f\n" "ld1 { v7.4s }, [x10], #0x10\n" "ld1 { v23.4s }, [x9], #0x10\n" "tbz x16, #1, 56f\n" "ld1 { v11.d }[0], [x10], #0x8\n" "ld1 { v27.d }[0], [x9], #0x8\n" "tbz x16, #0, 59f\n" "ld1 { v11.s }[2], [x10]\n" "ld1 { v27.s }[2], [x9]\n" "b 59f\n" "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset "tbz x16, #0, 59f\n" "ld1 { v11.s }[0], [x10]\n" "ld1 { v27.s }[0], [x9]\n" "b 59f\n" "57:" // Oddments: Load requant params: Bit 2: Unset "tbz x16, #1, 58f\n" "ld1 { v7.d }[0], [x10], #0x8\n" "ld1 { v23.d }[0], [x9], #0x8\n" "tbz x16, #0, 59f\n" "ld1 { v7.s }[2], [x10]\n" "ld1 { v23.s }[2], [x9]\n" "b 59f\n" "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 59f\n" "ld1 { v7.s }[0], [x10]\n" "ld1 { v23.s }[0], [x9]\n" "59:" // Oddments: Load requant params: Bit 2: End "sqrdmulh v28.4s, v28.4s, v7.4s\n" "and v20.16b, v28.16b, v23.16b\n" "add x28, x28, x13\n" "add x27, x27, x13\n" "sqrdmulh v9.4s, v9.4s, v11.4s\n" "sshr v20.4s, v20.4s, #0x1f\n" "add x26, x26, x13\n" "add x25, x25, x13\n" "and v4.16b, v9.16b, v27.16b\n" "sqrdmulh v3.4s, v3.4s, v7.4s\n" "sqrdmulh v0.4s, v0.4s, v7.4s\n" "sqrdmulh v6.4s, v6.4s, v7.4s\n" "sqadd v28.4s, v28.4s, v20.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" "and v19.16b, v3.16b, v23.16b\n" "sqrdmulh v30.4s, v30.4s, v11.4s\n" "and v29.16b, v0.16b, v23.16b\n" "sqrdmulh v22.4s, v22.4s, v11.4s\n" "and v26.16b, v6.16b, v23.16b\n" "sqrdmulh v2.4s, v2.4s, v11.4s\n" "sqadd v9.4s, v9.4s, v4.4s\n" "sshr v19.4s, v19.4s, #0x1f\n" "and v17.16b, v30.16b, v27.16b\n" "sshr v29.4s, v29.4s, #0x1f\n" "and v8.16b, v22.16b, v27.16b\n" "sshr v26.4s, v26.4s, #0x1f\n" "and v13.16b, v2.16b, v27.16b\n" "sqadd v3.4s, v3.4s, v19.4s\n" "sshr v17.4s, v17.4s, #0x1f\n" "sqadd v0.4s, v0.4s, v29.4s\n" "sshr v8.4s, v8.4s, #0x1f\n" "sqadd v6.4s, v6.4s, v26.4s\n" "sshr v13.4s, v13.4s, #0x1f\n" "srshl v28.4s, v28.4s, v23.4s\n" "srshl v3.4s, v3.4s, v23.4s\n" "sqadd v30.4s, v30.4s, v17.4s\n" "srshl v0.4s, v0.4s, v23.4s\n" "sqadd v22.4s, v22.4s, v8.4s\n" "srshl v6.4s, v6.4s, v23.4s\n" "sqadd v2.4s, v2.4s, v13.4s\n" "srshl v9.4s, v9.4s, v27.4s\n" "sqxtn v28.4h, v28.4s\n" "srshl v30.4s, v30.4s, v27.4s\n" "sqxtn v3.4h, v3.4s\n" "srshl v22.4s, v22.4s, v27.4s\n" "sqxtn v0.4h, v0.4s\n" "srshl v2.4s, v2.4s, v27.4s\n" "sqxtn v6.4h, v6.4s\n" "sqxtn2 v28.8h, v9.4s\n" "sqxtn2 v3.8h, v30.4s\n" "sqxtn2 v0.8h, v22.4s\n" "sqxtn2 v6.8h, v2.4s\n" "sqadd v28.8h, v28.8h, v5.8h\n" "sqadd v3.8h, v3.8h, v5.8h\n" "sqadd v0.8h, v0.8h, v5.8h\n" "sqadd v6.8h, v6.8h, v5.8h\n" "smax v28.8h, v28.8h, v14.8h\n" "smax v3.8h, v3.8h, v14.8h\n" "smax v0.8h, v0.8h, v14.8h\n" "smax v6.8h, v6.8h, v14.8h\n" "smin v28.8h, v28.8h, v12.8h\n" "smin v3.8h, v3.8h, v12.8h\n" "smin v0.8h, v0.8h, v12.8h\n" "smin v6.8h, v6.8h, v12.8h\n" "uzp1 v28.16b, v28.16b, v28.16b\n" "uzp1 v3.16b, v3.16b, v3.16b\n" "uzp1 v0.16b, v0.16b, v0.16b\n" "uzp1 v6.16b, v6.16b, v6.16b\n" "tbz x16, #2, 61f\n" "st1 { v28.s }[0], [x28], #0x4\n" "st1 { v3.s }[0], [x27], #0x4\n" "st1 { v0.s }[0], [x26], #0x4\n" "st1 { v6.s }[0], [x25], #0x4\n" "tbz x16, #1, 60f\n" "st1 { v28.h }[2], [x28], #0x2\n" "st1 { v3.h }[2], [x27], #0x2\n" "st1 { v0.h }[2], [x26], #0x2\n" "st1 { v6.h }[2], [x25], #0x2\n" "tbz x16, #0, 63f\n" "st1 { v28.b }[6], [x28], #0x1\n" "st1 { v3.b }[6], [x27], #0x1\n" "st1 { v0.b }[6], [x26], #0x1\n" "st1 { v6.b }[6], [x25], #0x1\n" "b 63f\n" "60:" // Oddments: Bit 2: Bit 1: Unset "tbz x16, #0, 63f\n" "st1 { v28.b }[4], [x28], #0x1\n" "st1 { v3.b }[4], [x27], #0x1\n" "st1 { v0.b }[4], [x26], #0x1\n" "st1 { v6.b }[4], [x25], #0x1\n" "b 63f\n" "61:" // Oddments: Bit 2: Unset "tbz x16, #1, 62f\n" "st1 { v28.h }[0], [x28], #0x2\n" "st1 { v3.h }[0], [x27], #0x2\n" "st1 { v0.h }[0], [x26], #0x2\n" "st1 { v6.h }[0], [x25], #0x2\n" "tbz x16, #0, 63f\n" "st1 { v28.b }[2], [x28], #0x1\n" "st1 { v3.b }[2], [x27], #0x1\n" "st1 { v0.b }[2], [x26], #0x1\n" "st1 { v6.b }[2], [x25], #0x1\n" "b 63f\n" "62:" // Oddments: Bit 2: Unset: Bit 1: Unset "tbz x16, #0, 63f\n" "st1 { v28.b }[0], [x28], #0x1\n" "st1 { v3.b }[0], [x27], #0x1\n" "st1 { v0.b }[0], [x26], #0x1\n" "st1 { v6.b }[0], [x25], #0x1\n" "63:" // Oddments: Bit 2: End "64:" // End : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } } // namespace depthwise } // namespace arm_conv #endif // defined(__aarch64__)