From d02d5edfa15ba6c04a9986a8a362a945cb38ac31 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Fri, 22 Jan 2021 09:47:04 +0000 Subject: Integrate improved CPU depthwise convolution kernels * Replace assembly kernels for depthwise convolution with more optimized ones. * Add int8 assembly kernels. * Fix implicit padding on optimized kernels Resolves: COMPMID-3867, COMPMID-4361 Change-Id: I0b0867e05f61be4f368f62190d55e14d0ab3ebf2 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5622 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- .../generic.cpp | 1192 ++++++++++++++++++++ 1 file changed, 1192 insertions(+) create mode 100644 src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp (limited to 'src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp') diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp new file mode 100644 index 0000000000..8cbbfae00d --- /dev/null +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -0,0 +1,1192 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_gemm.hpp" + +#include +#include + +#if defined(__aarch64__) + +namespace arm_conv { +namespace depthwise { + +void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( + const unsigned int n_channels, + const uint8_t *const *const inptrs, + const int8_t *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *const outptrs +) +{ + struct Params + { + long unsigned int n_channels; + const int8_t *weights; + const int32_t *bias; + const arm_gemm::Requantize32 *requant; + const int32_t *const requant_muls; + const int32_t *const requant_shifts; + uint8_t *const *const outptrs; + const uint8_t *inptrs[16]; + + Params( + long unsigned int n_channels, + const uint8_t *const *inptrs_raw, + const int8_t *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *outptrs + ) : n_channels(n_channels), weights(weights), bias(bias), + requant(&qp), requant_muls(requant_muls), + requant_shifts(requant_shifts), outptrs(outptrs) + { + inptrs[0] = inptrs_raw[5]; + inptrs[1] = inptrs_raw[0]; + inptrs[2] = inptrs_raw[3]; + inptrs[3] = inptrs_raw[6]; + inptrs[4] = inptrs_raw[9]; + inptrs[5] = inptrs_raw[12]; + inptrs[6] = inptrs_raw[15]; + inptrs[7] = inptrs_raw[1]; + inptrs[8] = inptrs_raw[2]; + inptrs[9] = inptrs_raw[10]; + inptrs[10] = inptrs_raw[4]; + inptrs[11] = inptrs_raw[7]; + inptrs[12] = inptrs_raw[8]; + inptrs[13] = inptrs_raw[11]; + inptrs[14] = inptrs_raw[13]; + inptrs[15] = inptrs_raw[14]; + + } + }; + + const Params params(n_channels, inptrs, weights, bias, qp, + requant_muls, requant_shifts, outptrs); + + __asm__ __volatile__( + "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" + "mov x17, #0x0\n" + "ldr x16, [%x[params], %[offsetof_Params_weights]]\n" + "mov x15, #0x0\n" + "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" + "add x14, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" + "lsr x12, x8, #0x3\n" + "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "add x19, x22, %[offsetof_Requantize32_a_offset]\n" + "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x20, x22, %[offsetof_Requantize32_b_offset]\n" + "ld1r { v21.16b }, [x19]\n" + "add x19, x22, %[offsetof_Requantize32_c_offset]\n" + "ld1r { v17.16b }, [x20]\n" + "add x20, x22, %[offsetof_Requantize32_minval]\n" + "ld1r { v13.4s }, [x19]\n" + "add x19, x22, %[offsetof_Requantize32_maxval]\n" + "ld1r { v15.4s }, [x20]\n" + "ld1r { v14.4s }, [x19]\n" + "ldp x10, x9, [x21, #0x0]\n" + "ldp x28, x27, [x21, #0x10]\n" + "cbz x12, 3f\n" + "subs x12, x12, #0x1\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q11, [x19, #0x0]\n" + "mov v23.16b, v11.16b\n" + "ldr q26, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "mov v12.16b, v11.16b\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "mov v24.16b, v11.16b\n" + "ldr d0, [x16, #0x0]\n" + "ldr d1, [x16, #0x8]\n" + "mov v9.16b, v26.16b\n" + "ldr d2, [x16, #0x10]\n" + "mov v22.16b, v26.16b\n" + "ldr d3, [x16, #0x18]\n" + "mov v10.16b, v26.16b\n" + "ldr d4, [x16, #0x20]\n" + "ssubl v0.8h, v0.8b, v17.8b\n" + "ldr d5, [x16, #0x28]\n" + "ssubl v1.8h, v1.8b, v17.8b\n" + "ldr d6, [x16, #0x30]\n" + "ssubl v2.8h, v2.8b, v17.8b\n" + "ldr d7, [x16, #0x38]\n" + "ssubl v3.8h, v3.8b, v17.8b\n" + "ldr d8, [x16, #0x40]\n" + "ssubl v4.8h, v4.8b, v17.8b\n" + "ldp x23, x22, [x14, #0x0]\n" + "ssubl v5.8h, v5.8b, v17.8b\n" + "ldp x21, x20, [x14, #0x10]\n" + "ssubl v6.8h, v6.8b, v17.8b\n" + "ssubl v7.8h, v7.8b, v17.8b\n" + "ldr x19, [x14, #0x20]\n" + "ssubl v8.8h, v8.8b, v17.8b\n" + "ldr d31, [x23, x17]\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "ldr d30, [x22, x17]\n" + "ldr d29, [x21, x17]\n" + "usubl v30.8h, v30.8b, v21.8b\n" + "ldr d28, [x20, x17]\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "ldr d27, [x19, x17]\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "usubl v27.8h, v27.8b, v21.8b\n" + "beq 2f\n" + "1:" // Loop + "smlal v11.4s, v31.4h, v4.4h\n" + "ldr x21, [x14, #0x28]\n" + "add x16, x16, #0x48\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x20, [x14, #0x30]\n" + "subs x12, x12, #0x1\n" + "smlal v23.4s, v31.4h, v3.4h\n" + "ldr x26, [x14, #0x38]\n" + "smlal2 v9.4s, v31.8h, v3.8h\n" + "ldr x25, [x14, #0x40]\n" + "smlal v12.4s, v31.4h, v1.4h\n" + "ldr x19, [x14, #0x48]\n" + "smlal2 v22.4s, v31.8h, v1.8h\n" + "ldr x24, [x14, #0x50]\n" + "smlal v24.4s, v31.4h, v0.4h\n" + "ldr x23, [x14, #0x58]\n" + "smlal2 v10.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x17]\n" + "smlal v11.4s, v30.4h, v0.4h\n" + "ldr x22, [x14, #0x60]\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "ldr d30, [x19, x17]\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr x21, [x14, #0x68]\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x17]\n" + "smlal v11.4s, v28.4h, v5.4h\n" + "ldr x20, [x14, #0x70]\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "ldr x19, [x14, #0x78]\n" + "smlal v23.4s, v28.4h, v4.4h\n" + "ldr q25, [x13, #0x0]\n" + "smlal2 v9.4s, v28.8h, v4.8h\n" + "ldr q18, [x11, #0x0]\n" + "smlal v12.4s, v28.4h, v2.4h\n" + "ldr q16, [x13, #0x10]\n" + "add x13, x13, #0x20\n" + "smlal2 v22.4s, v28.8h, v2.8h\n" + "ldr q20, [x11, #0x10]\n" + "add x11, x11, #0x20\n" + "smlal v24.4s, v28.4h, v1.4h\n" + "smlal2 v10.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x17]\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "smlal v11.4s, v27.4h, v7.4h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "smlal v12.4s, v31.4h, v6.4h\n" + "smlal2 v22.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x17]\n" + "smlal v23.4s, v27.4h, v6.4h\n" + "smlal2 v9.4s, v27.8h, v6.8h\n" + "smlal v12.4s, v27.4h, v4.4h\n" + "smlal2 v22.4s, v27.8h, v4.8h\n" + "smlal v24.4s, v27.4h, v3.4h\n" + "smlal2 v10.4s, v27.8h, v3.8h\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "smlal v24.4s, v29.4h, v8.4h\n" + "smlal2 v10.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x17]\n" + "smlal v11.4s, v28.4h, v1.4h\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "smlal v23.4s, v28.4h, v0.4h\n" + "smlal2 v9.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x17]\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v1.4h\n" + "smlal2 v9.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x17]\n" + "usubl v30.8h, v30.8b, v21.8b\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "smlal v11.4s, v30.4h, v8.4h\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v23.4s, v30.4h, v7.4h\n" + "smlal2 v9.4s, v30.8h, v7.8h\n" + "smlal v12.4s, v30.4h, v5.4h\n" + "smlal2 v22.4s, v30.8h, v5.8h\n" + "smlal v24.4s, v30.4h, v4.4h\n" + "smlal2 v10.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x17]\n" + "smlal v11.4s, v29.4h, v3.4h\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal v12.4s, v29.4h, v0.4h\n" + "smlal2 v22.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x17]\n" + "smlal v23.4s, v28.4h, v5.4h\n" + "smlal2 v9.4s, v28.8h, v5.8h\n" + "smlal v24.4s, v28.4h, v2.4h\n" + "smlal2 v10.4s, v28.8h, v2.8h\n" + "ldr d28, [x19, x17]\n" + "add x17, x17, #0x8\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "usubl v30.8h, v30.8b, v21.8b\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "smlal v11.4s, v31.4h, v6.4h\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal v12.4s, v31.4h, v3.4h\n" + "smlal2 v22.4s, v31.8h, v3.8h\n" + "smlal v23.4s, v30.4h, v8.4h\n" + "smlal2 v9.4s, v30.8h, v8.8h\n" + "smlal v24.4s, v30.4h, v5.4h\n" + "smlal2 v10.4s, v30.8h, v5.8h\n" + "smlal v12.4s, v29.4h, v7.4h\n" + "smlal2 v22.4s, v29.8h, v7.8h\n" + "smlal v24.4s, v29.4h, v6.4h\n" + "smlal2 v10.4s, v29.8h, v6.8h\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "sqrdmulh v11.4s, v11.4s, v25.4s\n" + "sqrdmulh v26.4s, v26.4s, v16.4s\n" + "smlal v12.4s, v28.4h, v8.4h\n" + "smlal2 v22.4s, v28.8h, v8.8h\n" + "smlal v24.4s, v28.4h, v7.4h\n" + "smlal2 v10.4s, v28.8h, v7.8h\n" + "and v19.16b, v11.16b, v18.16b\n" + "and v5.16b, v26.16b, v20.16b\n" + "sqrdmulh v23.4s, v23.4s, v25.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqrdmulh v9.4s, v9.4s, v16.4s\n" + "sqadd v11.4s, v11.4s, v19.4s\n" + "sqadd v26.4s, v26.4s, v5.4s\n" + "and v28.16b, v23.16b, v18.16b\n" + "and v8.16b, v9.16b, v20.16b\n" + "srshl v11.4s, v11.4s, v18.4s\n" + "srshl v26.4s, v26.4s, v20.4s\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sshr v8.4s, v8.4s, #0x1f\n" + "add v11.4s, v11.4s, v13.4s\n" + "add v26.4s, v26.4s, v13.4s\n" + "sqadd v23.4s, v23.4s, v28.4s\n" + "smin v11.4s, v11.4s, v14.4s\n" + "smin v26.4s, v26.4s, v14.4s\n" + "sqadd v9.4s, v9.4s, v8.4s\n" + "smax v11.4s, v11.4s, v15.4s\n" + "smax v26.4s, v26.4s, v15.4s\n" + "srshl v23.4s, v23.4s, v18.4s\n" + "srshl v9.4s, v9.4s, v20.4s\n" + "uzp1 v11.16b, v11.16b, v26.16b\n" + "sqrdmulh v12.4s, v12.4s, v25.4s\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "str d11, [x10, x15]\n" + "add v23.4s, v23.4s, v13.4s\n" + "add v9.4s, v9.4s, v13.4s\n" + "and v1.16b, v12.16b, v18.16b\n" + "sqrdmulh v22.4s, v22.4s, v16.4s\n" + "smin v23.4s, v23.4s, v14.4s\n" + "smin v9.4s, v9.4s, v14.4s\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "smax v23.4s, v23.4s, v15.4s\n" + "smax v9.4s, v9.4s, v15.4s\n" + "sqadd v12.4s, v12.4s, v1.4s\n" + "and v0.16b, v22.16b, v20.16b\n" + "uzp1 v23.16b, v23.16b, v9.16b\n" + "sqrdmulh v24.4s, v24.4s, v25.4s\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d23, [x9, x15]\n" + "srshl v12.4s, v12.4s, v18.4s\n" + "sshr v0.4s, v0.4s, #0x1f\n" + "and v26.16b, v24.16b, v18.16b\n" + "sqrdmulh v10.4s, v10.4s, v16.4s\n" + "sqadd v22.4s, v22.4s, v0.4s\n" + "add v12.4s, v12.4s, v13.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v16.16b, v10.16b, v20.16b\n" + "smin v12.4s, v12.4s, v14.4s\n" + "srshl v22.4s, v22.4s, v20.4s\n" + "sqadd v24.4s, v24.4s, v26.4s\n" + "smax v12.4s, v12.4s, v15.4s\n" + "sshr v16.4s, v16.4s, #0x1f\n" + "add v22.4s, v22.4s, v13.4s\n" + "srshl v24.4s, v24.4s, v18.4s\n" + "sqadd v10.4s, v10.4s, v16.4s\n" + "smin v22.4s, v22.4s, v14.4s\n" + "add v24.4s, v24.4s, v13.4s\n" + "smax v22.4s, v22.4s, v15.4s\n" + "srshl v10.4s, v10.4s, v20.4s\n" + "smin v24.4s, v24.4s, v14.4s\n" + "uzp1 v12.16b, v12.16b, v22.16b\n" + "add v10.4s, v10.4s, v13.4s\n" + "uzp1 v12.16b, v12.16b, v12.16b\n" + "str d12, [x28, x15]\n" + "smax v24.4s, v24.4s, v15.4s\n" + "smin v10.4s, v10.4s, v14.4s\n" + "smax v10.4s, v10.4s, v15.4s\n" + "uzp1 v24.16b, v24.16b, v10.16b\n" + "uzp1 v24.16b, v24.16b, v24.16b\n" + "str d24, [x27, x15]\n" + "add x15, x15, #0x8\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q11, [x19, #0x0]\n" + "mov v23.16b, v11.16b\n" + "ldr q26, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "mov v12.16b, v11.16b\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "mov v24.16b, v11.16b\n" + "ldr d0, [x16, #0x0]\n" + "ldr d1, [x16, #0x8]\n" + "mov v9.16b, v26.16b\n" + "ldr d2, [x16, #0x10]\n" + "mov v22.16b, v26.16b\n" + "ldr d3, [x16, #0x18]\n" + "mov v10.16b, v26.16b\n" + "ldr d4, [x16, #0x20]\n" + "ssubl v0.8h, v0.8b, v17.8b\n" + "ldr d5, [x16, #0x28]\n" + "ssubl v1.8h, v1.8b, v17.8b\n" + "ldr d6, [x16, #0x30]\n" + "ssubl v2.8h, v2.8b, v17.8b\n" + "ldr d7, [x16, #0x38]\n" + "ssubl v3.8h, v3.8b, v17.8b\n" + "ldr d8, [x16, #0x40]\n" + "ssubl v4.8h, v4.8b, v17.8b\n" + "ldp x23, x22, [x14, #0x0]\n" + "ssubl v5.8h, v5.8b, v17.8b\n" + "ldp x21, x20, [x14, #0x10]\n" + "ssubl v6.8h, v6.8b, v17.8b\n" + "ssubl v7.8h, v7.8b, v17.8b\n" + "ldr x19, [x14, #0x20]\n" + "ssubl v8.8h, v8.8b, v17.8b\n" + "ldr d31, [x23, x17]\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "ldr d30, [x22, x17]\n" + "ldr d29, [x21, x17]\n" + "usubl v30.8h, v30.8b, v21.8b\n" + "ldr d28, [x20, x17]\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "ldr d27, [x19, x17]\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "usubl v27.8h, v27.8b, v21.8b\n" + "bgt 1b\n" + "2:" // Tail + "smlal v11.4s, v31.4h, v4.4h\n" + "ldr x21, [x14, #0x28]\n" + "tst x8, #0x7\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x20, [x14, #0x30]\n" + "smlal v23.4s, v31.4h, v3.4h\n" + "ldr x26, [x14, #0x38]\n" + "smlal2 v9.4s, v31.8h, v3.8h\n" + "ldr x25, [x14, #0x40]\n" + "smlal v12.4s, v31.4h, v1.4h\n" + "ldr x19, [x14, #0x48]\n" + "smlal2 v22.4s, v31.8h, v1.8h\n" + "ldr x24, [x14, #0x50]\n" + "smlal v24.4s, v31.4h, v0.4h\n" + "ldr x23, [x14, #0x58]\n" + "smlal2 v10.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x17]\n" + "smlal v11.4s, v30.4h, v0.4h\n" + "ldr x22, [x14, #0x60]\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "ldr d30, [x19, x17]\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr x21, [x14, #0x68]\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x17]\n" + "smlal v11.4s, v28.4h, v5.4h\n" + "ldr x20, [x14, #0x70]\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "ldr x19, [x14, #0x78]\n" + "smlal v23.4s, v28.4h, v4.4h\n" + "ldr q25, [x13, #0x0]\n" + "smlal2 v9.4s, v28.8h, v4.8h\n" + "ldr q18, [x11, #0x0]\n" + "smlal v12.4s, v28.4h, v2.4h\n" + "ldr q16, [x13, #0x10]\n" + "add x13, x13, #0x20\n" + "smlal2 v22.4s, v28.8h, v2.8h\n" + "ldr q20, [x11, #0x10]\n" + "add x11, x11, #0x20\n" + "smlal v24.4s, v28.4h, v1.4h\n" + "smlal2 v10.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x17]\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "smlal v11.4s, v27.4h, v7.4h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "smlal v12.4s, v31.4h, v6.4h\n" + "smlal2 v22.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x17]\n" + "smlal v23.4s, v27.4h, v6.4h\n" + "smlal2 v9.4s, v27.8h, v6.8h\n" + "smlal v12.4s, v27.4h, v4.4h\n" + "smlal2 v22.4s, v27.8h, v4.8h\n" + "smlal v24.4s, v27.4h, v3.4h\n" + "smlal2 v10.4s, v27.8h, v3.8h\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "smlal v24.4s, v29.4h, v8.4h\n" + "smlal2 v10.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x17]\n" + "smlal v11.4s, v28.4h, v1.4h\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "smlal v23.4s, v28.4h, v0.4h\n" + "smlal2 v9.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x17]\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v1.4h\n" + "smlal2 v9.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x17]\n" + "usubl v30.8h, v30.8b, v21.8b\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "smlal v11.4s, v30.4h, v8.4h\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v23.4s, v30.4h, v7.4h\n" + "smlal2 v9.4s, v30.8h, v7.8h\n" + "smlal v12.4s, v30.4h, v5.4h\n" + "smlal2 v22.4s, v30.8h, v5.8h\n" + "smlal v24.4s, v30.4h, v4.4h\n" + "smlal2 v10.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x17]\n" + "smlal v11.4s, v29.4h, v3.4h\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal v12.4s, v29.4h, v0.4h\n" + "smlal2 v22.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x17]\n" + "smlal v23.4s, v28.4h, v5.4h\n" + "smlal2 v9.4s, v28.8h, v5.8h\n" + "smlal v24.4s, v28.4h, v2.4h\n" + "smlal2 v10.4s, v28.8h, v2.8h\n" + "ldr d28, [x19, x17]\n" + "add x17, x17, #0x8\n" + "usubl v31.8h, v31.8b, v21.8b\n" + "usubl v30.8h, v30.8b, v21.8b\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "smlal v11.4s, v31.4h, v6.4h\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal v12.4s, v31.4h, v3.4h\n" + "smlal2 v22.4s, v31.8h, v3.8h\n" + "smlal v23.4s, v30.4h, v8.4h\n" + "smlal2 v9.4s, v30.8h, v8.8h\n" + "smlal v24.4s, v30.4h, v5.4h\n" + "smlal2 v10.4s, v30.8h, v5.8h\n" + "smlal v12.4s, v29.4h, v7.4h\n" + "smlal2 v22.4s, v29.8h, v7.8h\n" + "smlal v24.4s, v29.4h, v6.4h\n" + "smlal2 v10.4s, v29.8h, v6.8h\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "sqrdmulh v11.4s, v11.4s, v25.4s\n" + "sqrdmulh v26.4s, v26.4s, v16.4s\n" + "smlal v12.4s, v28.4h, v8.4h\n" + "smlal2 v22.4s, v28.8h, v8.8h\n" + "smlal v24.4s, v28.4h, v7.4h\n" + "smlal2 v10.4s, v28.8h, v7.8h\n" + "and v19.16b, v11.16b, v18.16b\n" + "and v5.16b, v26.16b, v20.16b\n" + "sqrdmulh v23.4s, v23.4s, v25.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqrdmulh v9.4s, v9.4s, v16.4s\n" + "sqadd v11.4s, v11.4s, v19.4s\n" + "sqadd v26.4s, v26.4s, v5.4s\n" + "and v28.16b, v23.16b, v18.16b\n" + "and v8.16b, v9.16b, v20.16b\n" + "srshl v11.4s, v11.4s, v18.4s\n" + "srshl v26.4s, v26.4s, v20.4s\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sshr v8.4s, v8.4s, #0x1f\n" + "add v11.4s, v11.4s, v13.4s\n" + "add v26.4s, v26.4s, v13.4s\n" + "sqadd v23.4s, v23.4s, v28.4s\n" + "smin v11.4s, v11.4s, v14.4s\n" + "smin v26.4s, v26.4s, v14.4s\n" + "sqadd v9.4s, v9.4s, v8.4s\n" + "smax v11.4s, v11.4s, v15.4s\n" + "smax v26.4s, v26.4s, v15.4s\n" + "srshl v23.4s, v23.4s, v18.4s\n" + "srshl v9.4s, v9.4s, v20.4s\n" + "uzp1 v11.16b, v11.16b, v26.16b\n" + "sqrdmulh v12.4s, v12.4s, v25.4s\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "str d11, [x10, x15]\n" + "add v23.4s, v23.4s, v13.4s\n" + "add v9.4s, v9.4s, v13.4s\n" + "and v1.16b, v12.16b, v18.16b\n" + "sqrdmulh v22.4s, v22.4s, v16.4s\n" + "smin v23.4s, v23.4s, v14.4s\n" + "smin v9.4s, v9.4s, v14.4s\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "smax v23.4s, v23.4s, v15.4s\n" + "smax v9.4s, v9.4s, v15.4s\n" + "sqadd v12.4s, v12.4s, v1.4s\n" + "and v0.16b, v22.16b, v20.16b\n" + "uzp1 v23.16b, v23.16b, v9.16b\n" + "sqrdmulh v24.4s, v24.4s, v25.4s\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d23, [x9, x15]\n" + "srshl v12.4s, v12.4s, v18.4s\n" + "sshr v0.4s, v0.4s, #0x1f\n" + "and v26.16b, v24.16b, v18.16b\n" + "sqrdmulh v10.4s, v10.4s, v16.4s\n" + "sqadd v22.4s, v22.4s, v0.4s\n" + "add v12.4s, v12.4s, v13.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v16.16b, v10.16b, v20.16b\n" + "smin v12.4s, v12.4s, v14.4s\n" + "srshl v22.4s, v22.4s, v20.4s\n" + "sqadd v24.4s, v24.4s, v26.4s\n" + "smax v12.4s, v12.4s, v15.4s\n" + "sshr v16.4s, v16.4s, #0x1f\n" + "add v22.4s, v22.4s, v13.4s\n" + "srshl v24.4s, v24.4s, v18.4s\n" + "sqadd v10.4s, v10.4s, v16.4s\n" + "smin v22.4s, v22.4s, v14.4s\n" + "add v24.4s, v24.4s, v13.4s\n" + "smax v22.4s, v22.4s, v15.4s\n" + "srshl v10.4s, v10.4s, v20.4s\n" + "smin v24.4s, v24.4s, v14.4s\n" + "uzp1 v12.16b, v12.16b, v22.16b\n" + "add v10.4s, v10.4s, v13.4s\n" + "uzp1 v12.16b, v12.16b, v12.16b\n" + "str d12, [x28, x15]\n" + "smax v24.4s, v24.4s, v15.4s\n" + "smin v10.4s, v10.4s, v14.4s\n" + "smax v10.4s, v10.4s, v15.4s\n" + "uzp1 v24.16b, v24.16b, v10.16b\n" + "uzp1 v24.16b, v24.16b, v24.16b\n" + "str d24, [x27, x15]\n" + "add x15, x15, #0x8\n" + "beq 64f\n" + "add x16, x16, #0x48\n" + "3:" // Oddments + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "tbz x8, #2, 5f\n" + "ld1 { v11.4s }, [x19], #0x10\n" + "tbz x8, #1, 4f\n" + "ld1 { v26.d }[0], [x19], #0x8\n" + "tbz x8, #0, 7f\n" + "ld1 { v26.s }[2], [x19]\n" + "b 7f\n" + "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset + "tbz x8, #0, 7f\n" + "ld1 { v26.s }[0], [x19]\n" + "b 7f\n" + "5:" // Oddments: Load bias: Bit 2: Unset + "tbz x8, #1, 6f\n" + "ld1 { v11.d }[0], [x19], #0x8\n" + "tbz x8, #0, 7f\n" + "ld1 { v11.s }[2], [x19]\n" + "b 7f\n" + "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 7f\n" + "ld1 { v11.s }[0], [x19]\n" + "7:" // Oddments: Load bias: Bit 2: End + "mov v23.16b, v11.16b\n" + "ldr d0, [x16, #0x0]\n" + "mov v9.16b, v26.16b\n" + "ldr d1, [x16, #0x8]\n" + "mov v12.16b, v11.16b\n" + "ldr d2, [x16, #0x10]\n" + "mov v22.16b, v26.16b\n" + "ldr d3, [x16, #0x18]\n" + "mov v24.16b, v11.16b\n" + "ldr d4, [x16, #0x20]\n" + "mov v10.16b, v26.16b\n" + "ldr d5, [x16, #0x28]\n" + "ssubl v0.8h, v0.8b, v17.8b\n" + "ldr d6, [x16, #0x30]\n" + "ssubl v1.8h, v1.8b, v17.8b\n" + "ldr d7, [x16, #0x38]\n" + "ssubl v2.8h, v2.8b, v17.8b\n" + "ldr d8, [x16, #0x40]\n" + "ssubl v3.8h, v3.8b, v17.8b\n" + "ldp x23, x22, [x14, #0x0]\n" + "add x23, x23, x17\n" + "ssubl v4.8h, v4.8b, v17.8b\n" + "ldp x21, x20, [x14, #0x10]\n" + "ssubl v5.8h, v5.8b, v17.8b\n" + "ldr x19, [x14, #0x20]\n" + "ssubl v6.8h, v6.8b, v17.8b\n" + "add x22, x22, x17\n" + "ssubl v7.8h, v7.8b, v17.8b\n" + "add x21, x21, x17\n" + "ssubl v8.8h, v8.8b, v17.8b\n" + "add x20, x20, x17\n" + "add x19, x19, x17\n" + "tbz x8, #2, 9f\n" + "ld1 { v31.s }[0], [x23], #0x4\n" + "ld1 { v30.s }[0], [x22], #0x4\n" + "ld1 { v29.s }[0], [x21], #0x4\n" + "ld1 { v28.s }[0], [x20], #0x4\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x8, #1, 8f\n" + "ld1 { v31.h }[2], [x23], #0x2\n" + "ld1 { v30.h }[2], [x22], #0x2\n" + "ld1 { v29.h }[2], [x21], #0x2\n" + "ld1 { v28.h }[2], [x20], #0x2\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[6], [x23]\n" + "ld1 { v30.b }[6], [x22]\n" + "ld1 { v29.b }[6], [x21]\n" + "ld1 { v28.b }[6], [x20]\n" + "ld1 { v27.b }[6], [x19]\n" + "b 11f\n" + "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[4], [x23]\n" + "ld1 { v30.b }[4], [x22]\n" + "ld1 { v29.b }[4], [x21]\n" + "ld1 { v28.b }[4], [x20]\n" + "ld1 { v27.b }[4], [x19]\n" + "b 11f\n" + "9:" // Oddments: Initial loads: Bit 2: Unset + "tbz x8, #1, 10f\n" + "ld1 { v31.h }[0], [x23], #0x2\n" + "ld1 { v30.h }[0], [x22], #0x2\n" + "ld1 { v29.h }[0], [x21], #0x2\n" + "ld1 { v28.h }[0], [x20], #0x2\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[2], [x23]\n" + "ld1 { v30.b }[2], [x22]\n" + "ld1 { v29.b }[2], [x21]\n" + "ld1 { v28.b }[2], [x20]\n" + "ld1 { v27.b }[2], [x19]\n" + "b 11f\n" + "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[0], [x23]\n" + "ld1 { v30.b }[0], [x22]\n" + "ld1 { v29.b }[0], [x21]\n" + "ld1 { v28.b }[0], [x20]\n" + "ld1 { v27.b }[0], [x19]\n" + "11:" // Oddments: Initial loads: Bit 2: End + "usubl v31.8h, v31.8b, v21.8b\n" + "ldr x21, [x14, #0x28]\n" + "add x21, x21, x17\n" + "usubl v30.8h, v30.8b, v21.8b\n" + "usubl v29.8h, v29.8b, v21.8b\n" + "usubl v28.8h, v28.8b, v21.8b\n" + "usubl v27.8h, v27.8b, v21.8b\n" + "smlal v11.4s, v31.4h, v4.4h\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "smlal v23.4s, v31.4h, v3.4h\n" + "smlal2 v9.4s, v31.8h, v3.8h\n" + "smlal v12.4s, v31.4h, v1.4h\n" + "smlal2 v22.4s, v31.8h, v1.8h\n" + "smlal v24.4s, v31.4h, v0.4h\n" + "smlal2 v10.4s, v31.8h, v0.8h\n" + "smlal v11.4s, v30.4h, v0.4h\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "smlal v11.4s, v28.4h, v5.4h\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "smlal v23.4s, v28.4h, v4.4h\n" + "smlal2 v9.4s, v28.8h, v4.8h\n" + "smlal v12.4s, v28.4h, v2.4h\n" + "smlal2 v22.4s, v28.8h, v2.8h\n" + "smlal v24.4s, v28.4h, v1.4h\n" + "smlal2 v10.4s, v28.8h, v1.8h\n" + "tbz x8, #2, 13f\n" + "ld1 { v31.s }[0], [x21], #0x4\n" + "tbz x8, #1, 12f\n" + "ld1 { v31.h }[2], [x21], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[6], [x21]\n" + "b 15f\n" + "12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[4], [x21]\n" + "b 15f\n" + "13:" // Oddments: Load (3, 0): Bit 2: Unset + "tbz x8, #1, 14f\n" + "ld1 { v31.h }[0], [x21], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[2], [x21]\n" + "b 15f\n" + "14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[0], [x21]\n" + "15:" // Oddments: Load (3, 0): Bit 2: End + "usubl v31.8h, v31.8b, v21.8b\n" + "ldr x20, [x14, #0x30]\n" + "smlal v11.4s, v27.4h, v7.4h\n" + "add x20, x20, x17\n" + "smlal v12.4s, v31.4h, v6.4h\n" + "smlal2 v22.4s, v31.8h, v6.8h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "smlal v23.4s, v27.4h, v6.4h\n" + "smlal2 v9.4s, v27.8h, v6.8h\n" + "smlal v12.4s, v27.4h, v4.4h\n" + "smlal2 v22.4s, v27.8h, v4.8h\n" + "smlal v24.4s, v27.4h, v3.4h\n" + "smlal2 v10.4s, v27.8h, v3.8h\n" + "tbz x8, #2, 17f\n" + "ld1 { v29.s }[0], [x20], #0x4\n" + "tbz x8, #1, 16f\n" + "ld1 { v29.h }[2], [x20], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[6], [x20]\n" + "b 19f\n" + "16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[4], [x20]\n" + "b 19f\n" + "17:" // Oddments: Load (3, 3): Bit 2: Unset + "tbz x8, #1, 18f\n" + "ld1 { v29.h }[0], [x20], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[2], [x20]\n" + "b 19f\n" + "18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[0], [x20]\n" + "19:" // Oddments: Load (3, 3): Bit 2: End + "usubl v29.8h, v29.8b, v21.8b\n" + "ldr x26, [x14, #0x38]\n" + "smlal v24.4s, v29.4h, v8.4h\n" + "add x26, x26, x17\n" + "smlal2 v10.4s, v29.8h, v8.8h\n" + "tbz x8, #2, 21f\n" + "ld1 { v28.s }[0], [x26], #0x4\n" + "tbz x8, #1, 20f\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[6], [x26]\n" + "b 23f\n" + "20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[4], [x26]\n" + "b 23f\n" + "21:" // Oddments: Load (0, 1): Bit 2: Unset + "tbz x8, #1, 22f\n" + "ld1 { v28.h }[0], [x26], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[2], [x26]\n" + "b 23f\n" + "22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[0], [x26]\n" + "23:" // Oddments: Load (0, 1): Bit 2: End + "usubl v28.8h, v28.8b, v21.8b\n" + "ldr x25, [x14, #0x40]\n" + "smlal v11.4s, v28.4h, v1.4h\n" + "add x25, x25, x17\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "smlal v23.4s, v28.4h, v0.4h\n" + "smlal2 v9.4s, v28.8h, v0.8h\n" + "tbz x8, #2, 25f\n" + "ld1 { v31.s }[0], [x25], #0x4\n" + "tbz x8, #1, 24f\n" + "ld1 { v31.h }[2], [x25], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[6], [x25]\n" + "b 27f\n" + "24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[4], [x25]\n" + "b 27f\n" + "25:" // Oddments: Load (0, 2): Bit 2: Unset + "tbz x8, #1, 26f\n" + "ld1 { v31.h }[0], [x25], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[2], [x25]\n" + "b 27f\n" + "26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[0], [x25]\n" + "27:" // Oddments: Load (0, 2): Bit 2: End + "usubl v31.8h, v31.8b, v21.8b\n" + "ldr x19, [x14, #0x48]\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "add x19, x19, x17\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v1.4h\n" + "smlal2 v9.4s, v31.8h, v1.8h\n" + "tbz x8, #2, 29f\n" + "ld1 { v30.s }[0], [x19], #0x4\n" + "tbz x8, #1, 28f\n" + "ld1 { v30.h }[2], [x19], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[6], [x19]\n" + "b 31f\n" + "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[4], [x19]\n" + "b 31f\n" + "29:" // Oddments: Load (2, 2): Bit 2: Unset + "tbz x8, #1, 30f\n" + "ld1 { v30.h }[0], [x19], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[2], [x19]\n" + "b 31f\n" + "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[0], [x19]\n" + "31:" // Oddments: Load (2, 2): Bit 2: End + "usubl v30.8h, v30.8b, v21.8b\n" + "ldr x24, [x14, #0x50]\n" + "smlal v11.4s, v30.4h, v8.4h\n" + "add x24, x24, x17\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v23.4s, v30.4h, v7.4h\n" + "smlal2 v9.4s, v30.8h, v7.8h\n" + "smlal v12.4s, v30.4h, v5.4h\n" + "smlal2 v22.4s, v30.8h, v5.8h\n" + "smlal v24.4s, v30.4h, v4.4h\n" + "smlal2 v10.4s, v30.8h, v4.8h\n" + "tbz x8, #2, 33f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 32f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[6], [x24]\n" + "b 35f\n" + "32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[4], [x24]\n" + "b 35f\n" + "33:" // Oddments: Load (1, 0): Bit 2: Unset + "tbz x8, #1, 34f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[2], [x24]\n" + "b 35f\n" + "34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[0], [x24]\n" + "35:" // Oddments: Load (1, 0): Bit 2: End + "usubl v29.8h, v29.8b, v21.8b\n" + "ldr x23, [x14, #0x58]\n" + "smlal v11.4s, v29.4h, v3.4h\n" + "add x23, x23, x17\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal v12.4s, v29.4h, v0.4h\n" + "smlal2 v22.4s, v29.8h, v0.8h\n" + "tbz x8, #2, 37f\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "tbz x8, #1, 36f\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[6], [x23]\n" + "b 39f\n" + "36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[4], [x23]\n" + "b 39f\n" + "37:" // Oddments: Load (1, 3): Bit 2: Unset + "tbz x8, #1, 38f\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[2], [x23]\n" + "b 39f\n" + "38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[0], [x23]\n" + "39:" // Oddments: Load (1, 3): Bit 2: End + "usubl v28.8h, v28.8b, v21.8b\n" + "ldr x22, [x14, #0x60]\n" + "smlal v23.4s, v28.4h, v5.4h\n" + "add x22, x22, x17\n" + "smlal2 v9.4s, v28.8h, v5.8h\n" + "smlal v24.4s, v28.4h, v2.4h\n" + "smlal2 v10.4s, v28.8h, v2.8h\n" + "tbz x8, #2, 41f\n" + "ld1 { v31.s }[0], [x22], #0x4\n" + "tbz x8, #1, 40f\n" + "ld1 { v31.h }[2], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[6], [x22]\n" + "b 43f\n" + "40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[4], [x22]\n" + "b 43f\n" + "41:" // Oddments: Load (2, 0): Bit 2: Unset + "tbz x8, #1, 42f\n" + "ld1 { v31.h }[0], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[2], [x22]\n" + "b 43f\n" + "42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[0], [x22]\n" + "43:" // Oddments: Load (2, 0): Bit 2: End + "usubl v31.8h, v31.8b, v21.8b\n" + "ldr x21, [x14, #0x68]\n" + "smlal v11.4s, v31.4h, v6.4h\n" + "add x21, x21, x17\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal v12.4s, v31.4h, v3.4h\n" + "smlal2 v22.4s, v31.8h, v3.8h\n" + "tbz x8, #2, 45f\n" + "ld1 { v30.s }[0], [x21], #0x4\n" + "tbz x8, #1, 44f\n" + "ld1 { v30.h }[2], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[6], [x21]\n" + "b 47f\n" + "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[4], [x21]\n" + "b 47f\n" + "45:" // Oddments: Load (2, 3): Bit 2: Unset + "tbz x8, #1, 46f\n" + "ld1 { v30.h }[0], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[2], [x21]\n" + "b 47f\n" + "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[0], [x21]\n" + "47:" // Oddments: Load (2, 3): Bit 2: End + "usubl v30.8h, v30.8b, v21.8b\n" + "ldr x20, [x14, #0x70]\n" + "smlal v23.4s, v30.4h, v8.4h\n" + "add x20, x20, x17\n" + "smlal2 v9.4s, v30.8h, v8.8h\n" + "smlal v24.4s, v30.4h, v5.4h\n" + "smlal2 v10.4s, v30.8h, v5.8h\n" + "tbz x8, #2, 49f\n" + "ld1 { v29.s }[0], [x20], #0x4\n" + "tbz x8, #1, 48f\n" + "ld1 { v29.h }[2], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[6], [x20]\n" + "b 51f\n" + "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[4], [x20]\n" + "b 51f\n" + "49:" // Oddments: Load (3, 1): Bit 2: Unset + "tbz x8, #1, 50f\n" + "ld1 { v29.h }[0], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[2], [x20]\n" + "b 51f\n" + "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[0], [x20]\n" + "51:" // Oddments: Load (3, 1): Bit 2: End + "usubl v29.8h, v29.8b, v21.8b\n" + "ldr x19, [x14, #0x78]\n" + "smlal v12.4s, v29.4h, v7.4h\n" + "add x19, x19, x17\n" + "smlal2 v22.4s, v29.8h, v7.8h\n" + "smlal v24.4s, v29.4h, v6.4h\n" + "smlal2 v10.4s, v29.8h, v6.8h\n" + "tbz x8, #2, 53f\n" + "ld1 { v28.s }[0], [x19], #0x4\n" + "tbz x8, #1, 52f\n" + "ld1 { v28.h }[2], [x19], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[6], [x19]\n" + "b 55f\n" + "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[4], [x19]\n" + "b 55f\n" + "53:" // Oddments: Load (3, 2): Bit 2: Unset + "tbz x8, #1, 54f\n" + "ld1 { v28.h }[0], [x19], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[2], [x19]\n" + "b 55f\n" + "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[0], [x19]\n" + "55:" // Oddments: Load (3, 2): Bit 2: End + "usubl v28.8h, v28.8b, v21.8b\n" + "smlal v12.4s, v28.4h, v8.4h\n" + "smlal2 v22.4s, v28.8h, v8.8h\n" + "smlal v24.4s, v28.4h, v7.4h\n" + "smlal2 v10.4s, v28.8h, v7.8h\n" + "tbz x8, #2, 57f\n" + "ld1 { v25.4s }, [x13], #0x10\n" + "ld1 { v18.4s }, [x11], #0x10\n" + "tbz x8, #1, 56f\n" + "ld1 { v16.d }[0], [x13], #0x8\n" + "ld1 { v20.d }[0], [x11], #0x8\n" + "tbz x8, #0, 59f\n" + "ld1 { v16.s }[2], [x13]\n" + "ld1 { v20.s }[2], [x11]\n" + "b 59f\n" + "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset + "tbz x8, #0, 59f\n" + "ld1 { v16.s }[0], [x13]\n" + "ld1 { v20.s }[0], [x11]\n" + "b 59f\n" + "57:" // Oddments: Load requant params: Bit 2: Unset + "tbz x8, #1, 58f\n" + "ld1 { v25.d }[0], [x13], #0x8\n" + "ld1 { v18.d }[0], [x11], #0x8\n" + "tbz x8, #0, 59f\n" + "ld1 { v25.s }[2], [x13]\n" + "ld1 { v18.s }[2], [x11]\n" + "b 59f\n" + "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 59f\n" + "ld1 { v25.s }[0], [x13]\n" + "ld1 { v18.s }[0], [x11]\n" + "59:" // Oddments: Load requant params: Bit 2: End + "sqrdmulh v11.4s, v11.4s, v25.4s\n" + "add x10, x10, x15\n" + "sqrdmulh v26.4s, v26.4s, v16.4s\n" + "add x9, x9, x15\n" + "sqrdmulh v23.4s, v23.4s, v25.4s\n" + "add x28, x28, x15\n" + "sqrdmulh v9.4s, v9.4s, v16.4s\n" + "add x27, x27, x15\n" + "sqrdmulh v12.4s, v12.4s, v25.4s\n" + "and v19.16b, v11.16b, v18.16b\n" + "and v5.16b, v26.16b, v20.16b\n" + "and v28.16b, v23.16b, v18.16b\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "sqadd v11.4s, v11.4s, v19.4s\n" + "sqadd v26.4s, v26.4s, v5.4s\n" + "sqadd v23.4s, v23.4s, v28.4s\n" + "and v8.16b, v9.16b, v20.16b\n" + "srshl v11.4s, v11.4s, v18.4s\n" + "srshl v26.4s, v26.4s, v20.4s\n" + "srshl v23.4s, v23.4s, v18.4s\n" + "sshr v8.4s, v8.4s, #0x1f\n" + "add v11.4s, v11.4s, v13.4s\n" + "add v26.4s, v26.4s, v13.4s\n" + "add v23.4s, v23.4s, v13.4s\n" + "smin v11.4s, v11.4s, v14.4s\n" + "smin v26.4s, v26.4s, v14.4s\n" + "smin v23.4s, v23.4s, v14.4s\n" + "smax v11.4s, v11.4s, v15.4s\n" + "smax v26.4s, v26.4s, v15.4s\n" + "smax v23.4s, v23.4s, v15.4s\n" + "sqadd v9.4s, v9.4s, v8.4s\n" + "uzp1 v11.16b, v11.16b, v26.16b\n" + "and v1.16b, v12.16b, v18.16b\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "srshl v9.4s, v9.4s, v20.4s\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "sqrdmulh v22.4s, v22.4s, v16.4s\n" + "sqrdmulh v24.4s, v24.4s, v25.4s\n" + "add v9.4s, v9.4s, v13.4s\n" + "sqadd v12.4s, v12.4s, v1.4s\n" + "and v0.16b, v22.16b, v20.16b\n" + "smin v9.4s, v9.4s, v14.4s\n" + "and v26.16b, v24.16b, v18.16b\n" + "srshl v12.4s, v12.4s, v18.4s\n" + "smax v9.4s, v9.4s, v15.4s\n" + "sshr v0.4s, v0.4s, #0x1f\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "uzp1 v23.16b, v23.16b, v9.16b\n" + "add v12.4s, v12.4s, v13.4s\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "sqadd v22.4s, v22.4s, v0.4s\n" + "smin v12.4s, v12.4s, v14.4s\n" + "sqadd v24.4s, v24.4s, v26.4s\n" + "sqrdmulh v10.4s, v10.4s, v16.4s\n" + "smax v12.4s, v12.4s, v15.4s\n" + "srshl v22.4s, v22.4s, v20.4s\n" + "srshl v24.4s, v24.4s, v18.4s\n" + "and v16.16b, v10.16b, v20.16b\n" + "add v22.4s, v22.4s, v13.4s\n" + "add v24.4s, v24.4s, v13.4s\n" + "sshr v16.4s, v16.4s, #0x1f\n" + "smin v22.4s, v22.4s, v14.4s\n" + "smin v24.4s, v24.4s, v14.4s\n" + "sqadd v10.4s, v10.4s, v16.4s\n" + "smax v22.4s, v22.4s, v15.4s\n" + "smax v24.4s, v24.4s, v15.4s\n" + "srshl v10.4s, v10.4s, v20.4s\n" + "uzp1 v12.16b, v12.16b, v22.16b\n" + "uzp1 v12.16b, v12.16b, v12.16b\n" + "add v10.4s, v10.4s, v13.4s\n" + "smin v10.4s, v10.4s, v14.4s\n" + "smax v10.4s, v10.4s, v15.4s\n" + "uzp1 v24.16b, v24.16b, v10.16b\n" + "uzp1 v24.16b, v24.16b, v24.16b\n" + "tbz x8, #2, 61f\n" + "st1 { v11.s }[0], [x10], #0x4\n" + "st1 { v23.s }[0], [x9], #0x4\n" + "st1 { v12.s }[0], [x28], #0x4\n" + "st1 { v24.s }[0], [x27], #0x4\n" + "tbz x8, #1, 60f\n" + "st1 { v11.h }[2], [x10], #0x2\n" + "st1 { v23.h }[2], [x9], #0x2\n" + "st1 { v12.h }[2], [x28], #0x2\n" + "st1 { v24.h }[2], [x27], #0x2\n" + "tbz x8, #0, 63f\n" + "st1 { v11.b }[6], [x10], #0x1\n" + "st1 { v23.b }[6], [x9], #0x1\n" + "st1 { v12.b }[6], [x28], #0x1\n" + "st1 { v24.b }[6], [x27], #0x1\n" + "b 63f\n" + "60:" // Oddments: Bit 2: Bit 1: Unset + "tbz x8, #0, 63f\n" + "st1 { v11.b }[4], [x10], #0x1\n" + "st1 { v23.b }[4], [x9], #0x1\n" + "st1 { v12.b }[4], [x28], #0x1\n" + "st1 { v24.b }[4], [x27], #0x1\n" + "b 63f\n" + "61:" // Oddments: Bit 2: Unset + "tbz x8, #1, 62f\n" + "st1 { v11.h }[0], [x10], #0x2\n" + "st1 { v23.h }[0], [x9], #0x2\n" + "st1 { v12.h }[0], [x28], #0x2\n" + "st1 { v24.h }[0], [x27], #0x2\n" + "tbz x8, #0, 63f\n" + "st1 { v11.b }[2], [x10], #0x1\n" + "st1 { v23.b }[2], [x9], #0x1\n" + "st1 { v12.b }[2], [x28], #0x1\n" + "st1 { v24.b }[2], [x27], #0x1\n" + "b 63f\n" + "62:" // Oddments: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 63f\n" + "st1 { v11.b }[0], [x10], #0x1\n" + "st1 { v23.b }[0], [x9], #0x1\n" + "st1 { v12.b }[0], [x28], #0x1\n" + "st1 { v24.b }[0], [x27], #0x1\n" + "63:" // Oddments: Bit 2: End + + "64:" // End + + : + : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); +} + +} // namespace depthwise +} // namespace arm_conv + +#endif // defined(__aarch64__) -- cgit v1.2.1