aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1166
1 files changed, 1166 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
new file mode 100644
index 0000000000..f1c1b2315c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2021-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_gemm.hpp"
+
+#include <cstddef>
+#include <cstdint>
+
+#if defined(__aarch64__)
+
+namespace arm_conv {
+namespace depthwise {
+
+void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
+ const unsigned int n_channels,
+ const uint8_t *const *const inptrs,
+ const int8_t *const weights,
+ const int32_t *const bias,
+ const arm_gemm::Requantize32 &qp,
+ const int32_t *const requant_muls,
+ const int32_t *const requant_shifts,
+ uint8_t *const *const outptrs
+)
+{
+ struct Params
+ {
+ uint64_t n_channels;
+ const void *weights;
+ const int32_t *bias;
+ const arm_gemm::Requantize32 *requant;
+ const int32_t *const requant_muls;
+ const int32_t *const requant_shifts;
+ uint8_t *const *const outptrs;
+ const uint8_t *inptrs[16];
+
+ Params(
+ long unsigned int n_channels,
+ const uint8_t *const *inptrs_raw,
+ const void *const weights,
+ const int32_t *const bias,
+ const arm_gemm::Requantize32 &qp,
+ const int32_t *const requant_muls,
+ const int32_t *const requant_shifts,
+ uint8_t *const *outptrs
+ ) : n_channels(n_channels), weights(weights), bias(bias),
+ requant(&qp), requant_muls(requant_muls),
+ requant_shifts(requant_shifts), outptrs(outptrs)
+ {
+ inptrs[0] = inptrs_raw[5];
+ inptrs[1] = inptrs_raw[0];
+ inptrs[2] = inptrs_raw[3];
+ inptrs[3] = inptrs_raw[6];
+ inptrs[4] = inptrs_raw[9];
+ inptrs[5] = inptrs_raw[12];
+ inptrs[6] = inptrs_raw[15];
+ inptrs[7] = inptrs_raw[1];
+ inptrs[8] = inptrs_raw[2];
+ inptrs[9] = inptrs_raw[10];
+ inptrs[10] = inptrs_raw[4];
+ inptrs[11] = inptrs_raw[7];
+ inptrs[12] = inptrs_raw[8];
+ inptrs[13] = inptrs_raw[11];
+ inptrs[14] = inptrs_raw[13];
+ inptrs[15] = inptrs_raw[14];
+
+ }
+ };
+
+ const Params params(n_channels, inptrs, weights, bias, qp,
+ requant_muls, requant_shifts, outptrs);
+
+ __asm__ __volatile__(
+ "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
+ "lsr x8, x7, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v14.16b }, [x20]\n"
+ "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
+ "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v19.16b }, [x21]\n"
+ "ld1r { v13.8h }, [x20]\n"
+ "add x21, x23, %[offsetof_Requantize32_minval]\n"
+ "add x20, x23, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v29.8h }, [x21]\n"
+ "ld1r { v12.8h }, [x20]\n"
+ "mov x17, #0x0\n"
+ "mov x16, #0x0\n"
+ "add x15, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "ldp x11, x10, [x22, #0x0]\n"
+ "ldp x9, x28, [x22, #0x10]\n"
+ "cbz x8, 3f\n"
+ "ldr d23, [x14, #0x0]\n"
+ "ldr d16, [x14, #0x8]\n"
+ "subs x8, x8, #0x1\n"
+ "ssubl v23.8h, v23.8b, v19.8b\n"
+ "ldr d1, [x14, #0x10]\n"
+ "ldr d5, [x14, #0x18]\n"
+ "ssubl v16.8h, v16.8b, v19.8b\n"
+ "ssubl v1.8h, v1.8b, v19.8b\n"
+ "ldr d26, [x14, #0x20]\n"
+ "ldr d18, [x14, #0x28]\n"
+ "ssubl v5.8h, v5.8b, v19.8b\n"
+ "ssubl v26.8h, v26.8b, v19.8b\n"
+ "ldr d31, [x14, #0x30]\n"
+ "ldr d25, [x14, #0x38]\n"
+ "ssubl v18.8h, v18.8b, v19.8b\n"
+ "ssubl v31.8h, v31.8b, v19.8b\n"
+ "ldr d20, [x14, #0x40]\n"
+ "ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
+ "ssubl v25.8h, v25.8b, v19.8b\n"
+ "ssubl v20.8h, v20.8b, v19.8b\n"
+ "ldr q9, [x20, #0x0]\n"
+ "ldr q24, [x20, #0x10]\n"
+ "add x20, x20, #0x20\n"
+ "str x20, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldp x23, x22, [x15, #0x0]\n"
+ "ldp x21, x20, [x15, #0x10]\n"
+ "mov v7.16b, v9.16b\n"
+ "mov v0.16b, v24.16b\n"
+ "ldr d22, [x23, x17]\n"
+ "ldr d4, [x22, x17]\n"
+ "mov v2.16b, v9.16b\n"
+ "mov v30.16b, v24.16b\n"
+ "ldr d8, [x21, x17]\n"
+ "ldr d27, [x20, x17]\n"
+ "mov v10.16b, v9.16b\n"
+ "mov v6.16b, v24.16b\n"
+ "ldr x20, [x15, #0x20]\n"
+ "ldr d15, [x20, x17]\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "beq 2f\n"
+ "1:" // Loop
+ "ldr q3, [x13, #0x0]\n"
+ "ldr q17, [x12, #0x0]\n"
+ "smlal v9.4s, v22.4h, v26.4h\n"
+ "smlal2 v24.4s, v22.8h, v26.8h\n"
+ "ldr q21, [x13, #0x10]\n"
+ "ldr q28, [x12, #0x10]\n"
+ "smlal v9.4s, v4.4h, v23.4h\n"
+ "smlal v7.4s, v22.4h, v5.4h\n"
+ "ldr x20, [x15, #0x28]\n"
+ "ldr d11, [x20, x17]\n"
+ "smlal v2.4s, v22.4h, v16.4h\n"
+ "smlal v10.4s, v22.4h, v23.4h\n"
+ "smlal2 v24.4s, v4.8h, v23.8h\n"
+ "ldr x20, [x15, #0x38]\n"
+ "ldr d4, [x20, x17]\n"
+ "smlal v9.4s, v27.4h, v18.4h\n"
+ "smlal2 v0.4s, v22.8h, v5.8h\n"
+ "smlal2 v30.4s, v22.8h, v16.8h\n"
+ "ldr x20, [x15, #0x30]\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "smlal2 v6.4s, v22.8h, v23.8h\n"
+ "ldr d22, [x20, x17]\n"
+ "smlal v7.4s, v8.4h, v1.4h\n"
+ "ldr x20, [x15, #0x40]\n"
+ "smlal v2.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v27.4h, v16.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x27, [x15, #0x48]\n"
+ "smlal2 v24.4s, v27.8h, v18.8h\n"
+ "smlal v9.4s, v15.4h, v25.4h\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "ldr x26, [x15, #0x50]\n"
+ "smlal2 v0.4s, v8.8h, v1.8h\n"
+ "ldr d8, [x20, x17]\n"
+ "smlal2 v30.4s, v27.8h, v1.8h\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "smlal2 v6.4s, v27.8h, v16.8h\n"
+ "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr x25, [x15, #0x58]\n"
+ "ldr x24, [x15, #0x60]\n"
+ "smlal v2.4s, v11.4h, v31.4h\n"
+ "smlal v10.4s, v15.4h, v5.4h\n"
+ "ldr x23, [x15, #0x68]\n"
+ "ldr x22, [x15, #0x70]\n"
+ "smlal2 v24.4s, v15.8h, v25.8h\n"
+ "smlal v9.4s, v4.4h, v16.4h\n"
+ "ldr x21, [x15, #0x78]\n"
+ "ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
+ "smlal2 v0.4s, v27.8h, v26.8h\n"
+ "ldr d27, [x27, x17]\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d11, [x26, x17]\n"
+ "smlal2 v6.4s, v15.8h, v5.8h\n"
+ "smlal v7.4s, v15.4h, v31.4h\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "add x14, x14, #0x48\n"
+ "smlal v2.4s, v15.4h, v26.4h\n"
+ "smlal v10.4s, v22.4h, v20.4h\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "subs x8, x8, #0x1\n"
+ "smlal2 v24.4s, v4.8h, v16.8h\n"
+ "smlal v9.4s, v8.4h, v1.4h\n"
+ "add x13, x13, #0x20\n"
+ "add x12, x12, #0x20\n"
+ "smlal2 v0.4s, v15.8h, v31.8h\n"
+ "smlal2 v30.4s, v15.8h, v26.8h\n"
+ "ldr d15, [x25, x17]\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "smlal2 v6.4s, v22.8h, v20.8h\n"
+ "ldr d22, [x24, x17]\n"
+ "smlal v7.4s, v4.4h, v23.4h\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "smlal v2.4s, v27.4h, v18.4h\n"
+ "smlal v10.4s, v27.4h, v26.4h\n"
+ "smlal2 v24.4s, v8.8h, v1.8h\n"
+ "smlal v9.4s, v27.4h, v20.4h\n"
+ "smlal2 v0.4s, v4.8h, v23.8h\n"
+ "ldr d4, [x23, x17]\n"
+ "smlal2 v30.4s, v27.8h, v18.8h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal2 v6.4s, v27.8h, v26.8h\n"
+ "ldr d26, [x22, x17]\n"
+ "smlal v7.4s, v8.4h, v16.4h\n"
+ "usubl v26.8h, v26.8b, v14.8b\n"
+ "smlal v2.4s, v11.4h, v23.4h\n"
+ "smlal v10.4s, v15.4h, v1.4h\n"
+ "smlal2 v24.4s, v27.8h, v20.8h\n"
+ "smlal v9.4s, v11.4h, v5.4h\n"
+ "smlal2 v0.4s, v8.8h, v16.8h\n"
+ "ldr d8, [x21, x17]\n"
+ "smlal2 v30.4s, v11.8h, v23.8h\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "smlal2 v6.4s, v15.8h, v1.8h\n"
+ "smlal v7.4s, v27.4h, v25.4h\n"
+ "add x17, x17, #0x8\n"
+ "smlal v2.4s, v22.4h, v5.4h\n"
+ "smlal v10.4s, v4.4h, v18.4h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal v9.4s, v22.4h, v31.4h\n"
+ "sqrdmulh v9.4s, v9.4s, v3.4s\n"
+ "smlal2 v0.4s, v27.8h, v25.8h\n"
+ "smlal2 v30.4s, v22.8h, v5.8h\n"
+ "and v27.16b, v9.16b, v17.16b\n"
+ "smlal2 v6.4s, v4.8h, v18.8h\n"
+ "smlal v7.4s, v15.4h, v18.4h\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "smlal v2.4s, v26.4h, v25.4h\n"
+ "smlal v10.4s, v26.4h, v31.4h\n"
+ "sqadd v9.4s, v9.4s, v27.4s\n"
+ "smlal2 v24.4s, v22.8h, v31.8h\n"
+ "smlal2 v0.4s, v15.8h, v18.8h\n"
+ "sqrdmulh v24.4s, v24.4s, v21.4s\n"
+ "smlal2 v30.4s, v26.8h, v25.8h\n"
+ "smlal2 v6.4s, v26.8h, v31.8h\n"
+ "and v31.16b, v24.16b, v28.16b\n"
+ "smlal v7.4s, v4.4h, v20.4h\n"
+ "smlal v2.4s, v8.4h, v20.4h\n"
+ "sqrdmulh v7.4s, v7.4s, v3.4s\n"
+ "smlal v10.4s, v8.4h, v25.4h\n"
+ "smlal2 v0.4s, v4.8h, v20.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v3.4s\n"
+ "smlal2 v30.4s, v8.8h, v20.8h\n"
+ "smlal2 v6.4s, v8.8h, v25.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v3.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "and v22.16b, v7.16b, v17.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v21.4s\n"
+ "and v3.16b, v2.16b, v17.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "and v11.16b, v10.16b, v17.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v21.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v20.16b, v0.16b, v28.16b\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "and v31.16b, v30.16b, v28.16b\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "and v18.16b, v6.16b, v28.16b\n"
+ "sqadd v7.4s, v7.4s, v22.4s\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v2.4s, v2.4s, v3.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v11.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v9.4s, v9.4s, v17.4s\n"
+ "srshl v7.4s, v7.4s, v17.4s\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "srshl v2.4s, v2.4s, v17.4s\n"
+ "sqadd v30.4s, v30.4s, v31.4s\n"
+ "srshl v10.4s, v10.4s, v17.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "srshl v24.4s, v24.4s, v28.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v0.4s, v0.4s, v28.4s\n"
+ "sqxtn v7.4h, v7.4s\n"
+ "srshl v30.4s, v30.4s, v28.4s\n"
+ "sqxtn v2.4h, v2.4s\n"
+ "srshl v6.4s, v6.4s, v28.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "sqxtn2 v9.8h, v24.4s\n"
+ "sqxtn2 v7.8h, v0.4s\n"
+ "sqxtn2 v2.8h, v30.4s\n"
+ "sqxtn2 v10.8h, v6.4s\n"
+ "sqadd v9.8h, v9.8h, v13.8h\n"
+ "sqadd v7.8h, v7.8h, v13.8h\n"
+ "sqadd v2.8h, v2.8h, v13.8h\n"
+ "sqadd v10.8h, v10.8h, v13.8h\n"
+ "smax v9.8h, v9.8h, v29.8h\n"
+ "smax v7.8h, v7.8h, v29.8h\n"
+ "smax v2.8h, v2.8h, v29.8h\n"
+ "smax v10.8h, v10.8h, v29.8h\n"
+ "smin v9.8h, v9.8h, v12.8h\n"
+ "smin v7.8h, v7.8h, v12.8h\n"
+ "smin v2.8h, v2.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
+ "str d9, [x11, x16]\n"
+ "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "uzp1 v2.16b, v2.16b, v2.16b\n"
+ "str d7, [x10, x16]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d2, [x9, x16]\n"
+ "str d10, [x28, x16]\n"
+ "ldr q9, [x20, #0x0]\n"
+ "ldr q24, [x20, #0x10]\n"
+ "add x20, x20, #0x20\n"
+ "ldr d23, [x14, #0x0]\n"
+ "ldr d16, [x14, #0x8]\n"
+ "add x16, x16, #0x8\n"
+ "str x20, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr d1, [x14, #0x10]\n"
+ "ldr d5, [x14, #0x18]\n"
+ "mov v7.16b, v9.16b\n"
+ "mov v0.16b, v24.16b\n"
+ "ldr d26, [x14, #0x20]\n"
+ "ldr d18, [x14, #0x28]\n"
+ "mov v2.16b, v9.16b\n"
+ "mov v30.16b, v24.16b\n"
+ "ldr d31, [x14, #0x30]\n"
+ "ldr d25, [x14, #0x38]\n"
+ "mov v10.16b, v9.16b\n"
+ "mov v6.16b, v24.16b\n"
+ "ldr d20, [x14, #0x40]\n"
+ "ldp x23, x22, [x15, #0x0]\n"
+ "ssubl v23.8h, v23.8b, v19.8b\n"
+ "ssubl v16.8h, v16.8b, v19.8b\n"
+ "ldp x21, x20, [x15, #0x10]\n"
+ "ldr d22, [x23, x17]\n"
+ "ssubl v1.8h, v1.8b, v19.8b\n"
+ "ssubl v5.8h, v5.8b, v19.8b\n"
+ "ldr d4, [x22, x17]\n"
+ "ldr d8, [x21, x17]\n"
+ "ssubl v26.8h, v26.8b, v19.8b\n"
+ "ssubl v18.8h, v18.8b, v19.8b\n"
+ "ldr d27, [x20, x17]\n"
+ "ldr x20, [x15, #0x20]\n"
+ "ssubl v31.8h, v31.8b, v19.8b\n"
+ "ssubl v25.8h, v25.8b, v19.8b\n"
+ "ldr d15, [x20, x17]\n"
+ "ssubl v20.8h, v20.8b, v19.8b\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "bgt 1b\n"
+ "2:" // Tail
+ "ldr q28, [x13, #0x0]\n"
+ "ldr q17, [x12, #0x0]\n"
+ "smlal v9.4s, v22.4h, v26.4h\n"
+ "smlal2 v24.4s, v22.8h, v26.8h\n"
+ "ldr q21, [x13, #0x10]\n"
+ "ldr q3, [x12, #0x10]\n"
+ "smlal v9.4s, v4.4h, v23.4h\n"
+ "smlal v7.4s, v22.4h, v5.4h\n"
+ "ldr x20, [x15, #0x28]\n"
+ "ldr d11, [x20, x17]\n"
+ "smlal v2.4s, v22.4h, v16.4h\n"
+ "smlal v10.4s, v22.4h, v23.4h\n"
+ "smlal2 v24.4s, v4.8h, v23.8h\n"
+ "ldr x20, [x15, #0x38]\n"
+ "ldr d4, [x20, x17]\n"
+ "smlal v9.4s, v27.4h, v18.4h\n"
+ "smlal2 v0.4s, v22.8h, v5.8h\n"
+ "smlal2 v30.4s, v22.8h, v16.8h\n"
+ "ldr x20, [x15, #0x30]\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "smlal2 v6.4s, v22.8h, v23.8h\n"
+ "ldr d22, [x20, x17]\n"
+ "smlal v7.4s, v8.4h, v1.4h\n"
+ "ldr x20, [x15, #0x40]\n"
+ "smlal v2.4s, v27.4h, v1.4h\n"
+ "smlal v10.4s, v27.4h, v16.4h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "ldr x26, [x15, #0x48]\n"
+ "smlal2 v24.4s, v27.8h, v18.8h\n"
+ "smlal v9.4s, v15.4h, v25.4h\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "ldr x25, [x15, #0x50]\n"
+ "smlal2 v0.4s, v8.8h, v1.8h\n"
+ "ldr d8, [x20, x17]\n"
+ "smlal2 v30.4s, v27.8h, v1.8h\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "smlal2 v6.4s, v27.8h, v16.8h\n"
+ "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr x24, [x15, #0x58]\n"
+ "ldr x23, [x15, #0x60]\n"
+ "smlal v2.4s, v11.4h, v31.4h\n"
+ "smlal v10.4s, v15.4h, v5.4h\n"
+ "ldr x22, [x15, #0x68]\n"
+ "ldr x21, [x15, #0x70]\n"
+ "smlal2 v24.4s, v15.8h, v25.8h\n"
+ "smlal v9.4s, v4.4h, v16.4h\n"
+ "ldr x20, [x15, #0x78]\n"
+ "tst x7, #0x7\n"
+ "smlal2 v0.4s, v27.8h, v26.8h\n"
+ "ldr d27, [x26, x17]\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d11, [x25, x17]\n"
+ "smlal2 v6.4s, v15.8h, v5.8h\n"
+ "smlal v7.4s, v15.4h, v31.4h\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "add x13, x13, #0x20\n"
+ "smlal v2.4s, v15.4h, v26.4h\n"
+ "smlal v10.4s, v22.4h, v20.4h\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "add x12, x12, #0x20\n"
+ "smlal2 v24.4s, v4.8h, v16.8h\n"
+ "smlal v9.4s, v8.4h, v1.4h\n"
+ "smlal2 v0.4s, v15.8h, v31.8h\n"
+ "smlal2 v30.4s, v15.8h, v26.8h\n"
+ "ldr d15, [x24, x17]\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "smlal2 v6.4s, v22.8h, v20.8h\n"
+ "ldr d22, [x23, x17]\n"
+ "smlal v7.4s, v4.4h, v23.4h\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "smlal v2.4s, v27.4h, v18.4h\n"
+ "smlal v10.4s, v27.4h, v26.4h\n"
+ "smlal2 v24.4s, v8.8h, v1.8h\n"
+ "smlal v9.4s, v27.4h, v20.4h\n"
+ "smlal2 v0.4s, v4.8h, v23.8h\n"
+ "ldr d4, [x22, x17]\n"
+ "smlal2 v30.4s, v27.8h, v18.8h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal2 v6.4s, v27.8h, v26.8h\n"
+ "ldr d26, [x21, x17]\n"
+ "smlal v7.4s, v8.4h, v16.4h\n"
+ "usubl v26.8h, v26.8b, v14.8b\n"
+ "smlal v2.4s, v11.4h, v23.4h\n"
+ "smlal v10.4s, v15.4h, v1.4h\n"
+ "smlal2 v24.4s, v27.8h, v20.8h\n"
+ "smlal v9.4s, v11.4h, v5.4h\n"
+ "smlal2 v0.4s, v8.8h, v16.8h\n"
+ "ldr d16, [x20, x17]\n"
+ "smlal2 v30.4s, v11.8h, v23.8h\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal2 v6.4s, v15.8h, v1.8h\n"
+ "smlal v7.4s, v27.4h, v25.4h\n"
+ "add x17, x17, #0x8\n"
+ "smlal v2.4s, v22.4h, v5.4h\n"
+ "smlal v10.4s, v4.4h, v18.4h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal v9.4s, v22.4h, v31.4h\n"
+ "sqrdmulh v9.4s, v9.4s, v28.4s\n"
+ "smlal2 v0.4s, v27.8h, v25.8h\n"
+ "smlal2 v30.4s, v22.8h, v5.8h\n"
+ "and v1.16b, v9.16b, v17.16b\n"
+ "smlal2 v6.4s, v4.8h, v18.8h\n"
+ "smlal v7.4s, v15.4h, v18.4h\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "smlal v2.4s, v26.4h, v25.4h\n"
+ "smlal v10.4s, v26.4h, v31.4h\n"
+ "sqadd v9.4s, v9.4s, v1.4s\n"
+ "smlal2 v24.4s, v22.8h, v31.8h\n"
+ "smlal2 v0.4s, v15.8h, v18.8h\n"
+ "sqrdmulh v24.4s, v24.4s, v21.4s\n"
+ "smlal2 v30.4s, v26.8h, v25.8h\n"
+ "smlal2 v6.4s, v26.8h, v31.8h\n"
+ "and v31.16b, v24.16b, v3.16b\n"
+ "smlal v7.4s, v4.4h, v20.4h\n"
+ "smlal v2.4s, v16.4h, v20.4h\n"
+ "sqrdmulh v7.4s, v7.4s, v28.4s\n"
+ "smlal v10.4s, v16.4h, v25.4h\n"
+ "smlal2 v0.4s, v4.8h, v20.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v28.4s\n"
+ "smlal2 v30.4s, v16.8h, v20.8h\n"
+ "smlal2 v6.4s, v16.8h, v25.8h\n"
+ "sqrdmulh v10.4s, v10.4s, v28.4s\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "and v22.16b, v7.16b, v17.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v21.4s\n"
+ "and v15.16b, v2.16b, v17.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "and v11.16b, v10.16b, v17.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v21.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v18.16b, v0.16b, v3.16b\n"
+ "sshr v15.4s, v15.4s, #0x1f\n"
+ "and v23.16b, v30.16b, v3.16b\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "and v21.16b, v6.16b, v3.16b\n"
+ "sqadd v7.4s, v7.4s, v22.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v2.4s, v2.4s, v15.4s\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v11.4s\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "srshl v9.4s, v9.4s, v17.4s\n"
+ "srshl v7.4s, v7.4s, v17.4s\n"
+ "sqadd v0.4s, v0.4s, v18.4s\n"
+ "srshl v2.4s, v2.4s, v17.4s\n"
+ "sqadd v30.4s, v30.4s, v23.4s\n"
+ "srshl v10.4s, v10.4s, v17.4s\n"
+ "sqadd v6.4s, v6.4s, v21.4s\n"
+ "srshl v24.4s, v24.4s, v3.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v0.4s, v0.4s, v3.4s\n"
+ "sqxtn v7.4h, v7.4s\n"
+ "srshl v30.4s, v30.4s, v3.4s\n"
+ "sqxtn v2.4h, v2.4s\n"
+ "srshl v6.4s, v6.4s, v3.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "sqxtn2 v9.8h, v24.4s\n"
+ "sqxtn2 v7.8h, v0.4s\n"
+ "sqxtn2 v2.8h, v30.4s\n"
+ "sqxtn2 v10.8h, v6.4s\n"
+ "sqadd v9.8h, v9.8h, v13.8h\n"
+ "sqadd v7.8h, v7.8h, v13.8h\n"
+ "sqadd v2.8h, v2.8h, v13.8h\n"
+ "sqadd v10.8h, v10.8h, v13.8h\n"
+ "smax v9.8h, v9.8h, v29.8h\n"
+ "smax v7.8h, v7.8h, v29.8h\n"
+ "smax v2.8h, v2.8h, v29.8h\n"
+ "smax v10.8h, v10.8h, v29.8h\n"
+ "smin v9.8h, v9.8h, v12.8h\n"
+ "smin v7.8h, v7.8h, v12.8h\n"
+ "smin v2.8h, v2.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
+ "str d9, [x11, x16]\n"
+ "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "uzp1 v2.16b, v2.16b, v2.16b\n"
+ "str d7, [x10, x16]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d2, [x9, x16]\n"
+ "str d10, [x28, x16]\n"
+ "add x16, x16, #0x8\n"
+ "beq 64f\n"
+ "add x14, x14, #0x48\n"
+ "3:" // Oddments
+ "ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
+ "tbz x7, #2, 5f\n"
+ "ld1 { v9.4s }, [x20], #0x10\n"
+ "tbz x7, #1, 4f\n"
+ "ld1 { v24.d }[0], [x20], #0x8\n"
+ "tbz x7, #0, 7f\n"
+ "ld1 { v24.s }[2], [x20]\n"
+ "b 7f\n"
+ "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
+ "tbz x7, #0, 7f\n"
+ "ld1 { v24.s }[0], [x20]\n"
+ "b 7f\n"
+ "5:" // Oddments: Load bias: Bit 2: Unset
+ "tbz x7, #1, 6f\n"
+ "ld1 { v9.d }[0], [x20], #0x8\n"
+ "tbz x7, #0, 7f\n"
+ "ld1 { v9.s }[2], [x20]\n"
+ "b 7f\n"
+ "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 7f\n"
+ "ld1 { v9.s }[0], [x20]\n"
+ "7:" // Oddments: Load bias: Bit 2: End
+ "ldr d23, [x14, #0x0]\n"
+ "ldr d16, [x14, #0x8]\n"
+ "mov v7.16b, v9.16b\n"
+ "mov v0.16b, v24.16b\n"
+ "ldr d1, [x14, #0x10]\n"
+ "ldr d5, [x14, #0x18]\n"
+ "mov v2.16b, v9.16b\n"
+ "mov v30.16b, v24.16b\n"
+ "ldr d26, [x14, #0x20]\n"
+ "ldr d18, [x14, #0x28]\n"
+ "mov v10.16b, v9.16b\n"
+ "mov v6.16b, v24.16b\n"
+ "ldr d31, [x14, #0x30]\n"
+ "ldr d25, [x14, #0x38]\n"
+ "ssubl v23.8h, v23.8b, v19.8b\n"
+ "ssubl v16.8h, v16.8b, v19.8b\n"
+ "ldr d20, [x14, #0x40]\n"
+ "ldp x24, x23, [x15, #0x0]\n"
+ "ssubl v1.8h, v1.8b, v19.8b\n"
+ "ssubl v5.8h, v5.8b, v19.8b\n"
+ "ldp x22, x21, [x15, #0x10]\n"
+ "ldr x20, [x15, #0x20]\n"
+ "ssubl v26.8h, v26.8b, v19.8b\n"
+ "ssubl v18.8h, v18.8b, v19.8b\n"
+ "ssubl v31.8h, v31.8b, v19.8b\n"
+ "ssubl v25.8h, v25.8b, v19.8b\n"
+ "ssubl v20.8h, v20.8b, v19.8b\n"
+ "add x24, x24, x17\n"
+ "add x23, x23, x17\n"
+ "add x22, x22, x17\n"
+ "add x21, x21, x17\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 9f\n"
+ "ld1 { v22.s }[0], [x24], #0x4\n"
+ "ld1 { v4.s }[0], [x23], #0x4\n"
+ "ld1 { v8.s }[0], [x22], #0x4\n"
+ "ld1 { v27.s }[0], [x21], #0x4\n"
+ "ld1 { v15.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 8f\n"
+ "ld1 { v22.h }[2], [x24], #0x2\n"
+ "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v8.h }[2], [x22], #0x2\n"
+ "ld1 { v27.h }[2], [x21], #0x2\n"
+ "ld1 { v15.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 11f\n"
+ "ld1 { v22.b }[6], [x24]\n"
+ "ld1 { v4.b }[6], [x23]\n"
+ "ld1 { v8.b }[6], [x22]\n"
+ "ld1 { v27.b }[6], [x21]\n"
+ "ld1 { v15.b }[6], [x20]\n"
+ "b 11f\n"
+ "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
+ "tbz x7, #0, 11f\n"
+ "ld1 { v22.b }[4], [x24]\n"
+ "ld1 { v4.b }[4], [x23]\n"
+ "ld1 { v8.b }[4], [x22]\n"
+ "ld1 { v27.b }[4], [x21]\n"
+ "ld1 { v15.b }[4], [x20]\n"
+ "b 11f\n"
+ "9:" // Oddments: Initial loads: Bit 2: Unset
+ "tbz x7, #1, 10f\n"
+ "ld1 { v22.h }[0], [x24], #0x2\n"
+ "ld1 { v4.h }[0], [x23], #0x2\n"
+ "ld1 { v8.h }[0], [x22], #0x2\n"
+ "ld1 { v27.h }[0], [x21], #0x2\n"
+ "ld1 { v15.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 11f\n"
+ "ld1 { v22.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x23]\n"
+ "ld1 { v8.b }[2], [x22]\n"
+ "ld1 { v27.b }[2], [x21]\n"
+ "ld1 { v15.b }[2], [x20]\n"
+ "b 11f\n"
+ "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 11f\n"
+ "ld1 { v22.b }[0], [x24]\n"
+ "ld1 { v4.b }[0], [x23]\n"
+ "ld1 { v8.b }[0], [x22]\n"
+ "ld1 { v27.b }[0], [x21]\n"
+ "ld1 { v15.b }[0], [x20]\n"
+ "11:" // Oddments: Initial loads: Bit 2: End
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "smlal v9.4s, v22.4h, v26.4h\n"
+ "smlal2 v24.4s, v22.8h, v26.8h\n"
+ "ldr x20, [x15, #0x28]\n"
+ "smlal v7.4s, v22.4h, v5.4h\n"
+ "smlal2 v0.4s, v22.8h, v5.8h\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "smlal v2.4s, v22.4h, v16.4h\n"
+ "smlal2 v30.4s, v22.8h, v16.8h\n"
+ "add x20, x20, x17\n"
+ "smlal v10.4s, v22.4h, v23.4h\n"
+ "smlal2 v6.4s, v22.8h, v23.8h\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "smlal v9.4s, v4.4h, v23.4h\n"
+ "smlal2 v24.4s, v4.8h, v23.8h\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "smlal v7.4s, v8.4h, v1.4h\n"
+ "smlal2 v0.4s, v8.8h, v1.8h\n"
+ "smlal v9.4s, v27.4h, v18.4h\n"
+ "smlal2 v24.4s, v27.8h, v18.8h\n"
+ "smlal v7.4s, v27.4h, v26.4h\n"
+ "smlal2 v0.4s, v27.8h, v26.8h\n"
+ "smlal v2.4s, v27.4h, v1.4h\n"
+ "smlal2 v30.4s, v27.8h, v1.8h\n"
+ "smlal v10.4s, v27.4h, v16.4h\n"
+ "smlal2 v6.4s, v27.8h, v16.8h\n"
+ "tbz x7, #2, 13f\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 12f\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 15f\n"
+ "ld1 { v21.b }[6], [x20]\n"
+ "b 15f\n"
+ "12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 15f\n"
+ "ld1 { v21.b }[4], [x20]\n"
+ "b 15f\n"
+ "13:" // Oddments: Load (3, 0): Bit 2: Unset
+ "tbz x7, #1, 14f\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 15f\n"
+ "ld1 { v21.b }[2], [x20]\n"
+ "b 15f\n"
+ "14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 15f\n"
+ "ld1 { v21.b }[0], [x20]\n"
+ "15:" // Oddments: Load (3, 0): Bit 2: End
+ "usubl v21.8h, v21.8b, v14.8b\n"
+ "smlal v2.4s, v21.4h, v31.4h\n"
+ "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "ldr x20, [x15, #0x30]\n"
+ "smlal v9.4s, v15.4h, v25.4h\n"
+ "smlal2 v24.4s, v15.8h, v25.8h\n"
+ "add x20, x20, x17\n"
+ "smlal v7.4s, v15.4h, v31.4h\n"
+ "smlal2 v0.4s, v15.8h, v31.8h\n"
+ "smlal v2.4s, v15.4h, v26.4h\n"
+ "smlal2 v30.4s, v15.8h, v26.8h\n"
+ "smlal v10.4s, v15.4h, v5.4h\n"
+ "smlal2 v6.4s, v15.8h, v5.8h\n"
+ "tbz x7, #2, 17f\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 16f\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 19f\n"
+ "ld1 { v28.b }[6], [x20]\n"
+ "b 19f\n"
+ "16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 19f\n"
+ "ld1 { v28.b }[4], [x20]\n"
+ "b 19f\n"
+ "17:" // Oddments: Load (3, 3): Bit 2: Unset
+ "tbz x7, #1, 18f\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 19f\n"
+ "ld1 { v28.b }[2], [x20]\n"
+ "b 19f\n"
+ "18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 19f\n"
+ "ld1 { v28.b }[0], [x20]\n"
+ "19:" // Oddments: Load (3, 3): Bit 2: End
+ "usubl v28.8h, v28.8b, v14.8b\n"
+ "ldr x20, [x15, #0x38]\n"
+ "smlal v10.4s, v28.4h, v20.4h\n"
+ "smlal2 v6.4s, v28.8h, v20.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 21f\n"
+ "ld1 { v22.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 20f\n"
+ "ld1 { v22.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 23f\n"
+ "ld1 { v22.b }[6], [x20]\n"
+ "b 23f\n"
+ "20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 23f\n"
+ "ld1 { v22.b }[4], [x20]\n"
+ "b 23f\n"
+ "21:" // Oddments: Load (0, 1): Bit 2: Unset
+ "tbz x7, #1, 22f\n"
+ "ld1 { v22.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 23f\n"
+ "ld1 { v22.b }[2], [x20]\n"
+ "b 23f\n"
+ "22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 23f\n"
+ "ld1 { v22.b }[0], [x20]\n"
+ "23:" // Oddments: Load (0, 1): Bit 2: End
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "ldr x20, [x15, #0x40]\n"
+ "smlal v9.4s, v22.4h, v16.4h\n"
+ "smlal2 v24.4s, v22.8h, v16.8h\n"
+ "smlal v7.4s, v22.4h, v23.4h\n"
+ "smlal2 v0.4s, v22.8h, v23.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 25f\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 24f\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 27f\n"
+ "ld1 { v21.b }[6], [x20]\n"
+ "b 27f\n"
+ "24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 27f\n"
+ "ld1 { v21.b }[4], [x20]\n"
+ "b 27f\n"
+ "25:" // Oddments: Load (0, 2): Bit 2: Unset
+ "tbz x7, #1, 26f\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 27f\n"
+ "ld1 { v21.b }[2], [x20]\n"
+ "b 27f\n"
+ "26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 27f\n"
+ "ld1 { v21.b }[0], [x20]\n"
+ "27:" // Oddments: Load (0, 2): Bit 2: End
+ "usubl v21.8h, v21.8b, v14.8b\n"
+ "ldr x20, [x15, #0x48]\n"
+ "smlal v9.4s, v21.4h, v1.4h\n"
+ "smlal2 v24.4s, v21.8h, v1.8h\n"
+ "smlal v7.4s, v21.4h, v16.4h\n"
+ "smlal2 v0.4s, v21.8h, v16.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 29f\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 28f\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 31f\n"
+ "ld1 { v28.b }[6], [x20]\n"
+ "b 31f\n"
+ "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 31f\n"
+ "ld1 { v28.b }[4], [x20]\n"
+ "b 31f\n"
+ "29:" // Oddments: Load (2, 2): Bit 2: Unset
+ "tbz x7, #1, 30f\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 31f\n"
+ "ld1 { v28.b }[2], [x20]\n"
+ "b 31f\n"
+ "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 31f\n"
+ "ld1 { v28.b }[0], [x20]\n"
+ "31:" // Oddments: Load (2, 2): Bit 2: End
+ "usubl v28.8h, v28.8b, v14.8b\n"
+ "ldr x20, [x15, #0x50]\n"
+ "smlal v9.4s, v28.4h, v20.4h\n"
+ "smlal2 v24.4s, v28.8h, v20.8h\n"
+ "smlal v7.4s, v28.4h, v25.4h\n"
+ "smlal2 v0.4s, v28.8h, v25.8h\n"
+ "add x20, x20, x17\n"
+ "smlal v2.4s, v28.4h, v18.4h\n"
+ "smlal2 v30.4s, v28.8h, v18.8h\n"
+ "smlal v10.4s, v28.4h, v26.4h\n"
+ "smlal2 v6.4s, v28.8h, v26.8h\n"
+ "tbz x7, #2, 33f\n"
+ "ld1 { v8.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 32f\n"
+ "ld1 { v8.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 35f\n"
+ "ld1 { v8.b }[6], [x20]\n"
+ "b 35f\n"
+ "32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 35f\n"
+ "ld1 { v8.b }[4], [x20]\n"
+ "b 35f\n"
+ "33:" // Oddments: Load (1, 0): Bit 2: Unset
+ "tbz x7, #1, 34f\n"
+ "ld1 { v8.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 35f\n"
+ "ld1 { v8.b }[2], [x20]\n"
+ "b 35f\n"
+ "34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 35f\n"
+ "ld1 { v8.b }[0], [x20]\n"
+ "35:" // Oddments: Load (1, 0): Bit 2: End
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "ldr x20, [x15, #0x58]\n"
+ "smlal v9.4s, v8.4h, v5.4h\n"
+ "smlal2 v24.4s, v8.8h, v5.8h\n"
+ "smlal v2.4s, v8.4h, v23.4h\n"
+ "smlal2 v30.4s, v8.8h, v23.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 37f\n"
+ "ld1 { v8.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 36f\n"
+ "ld1 { v8.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 39f\n"
+ "ld1 { v8.b }[6], [x20]\n"
+ "b 39f\n"
+ "36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 39f\n"
+ "ld1 { v8.b }[4], [x20]\n"
+ "b 39f\n"
+ "37:" // Oddments: Load (1, 3): Bit 2: Unset
+ "tbz x7, #1, 38f\n"
+ "ld1 { v8.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 39f\n"
+ "ld1 { v8.b }[2], [x20]\n"
+ "b 39f\n"
+ "38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 39f\n"
+ "ld1 { v8.b }[0], [x20]\n"
+ "39:" // Oddments: Load (1, 3): Bit 2: End
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "ldr x20, [x15, #0x60]\n"
+ "smlal v7.4s, v8.4h, v18.4h\n"
+ "smlal2 v0.4s, v8.8h, v18.8h\n"
+ "smlal v10.4s, v8.4h, v1.4h\n"
+ "smlal2 v6.4s, v8.8h, v1.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 41f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 40f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 43f\n"
+ "ld1 { v17.b }[6], [x20]\n"
+ "b 43f\n"
+ "40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 43f\n"
+ "ld1 { v17.b }[4], [x20]\n"
+ "b 43f\n"
+ "41:" // Oddments: Load (2, 0): Bit 2: Unset
+ "tbz x7, #1, 42f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 43f\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "b 43f\n"
+ "42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 43f\n"
+ "ld1 { v17.b }[0], [x20]\n"
+ "43:" // Oddments: Load (2, 0): Bit 2: End
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "ldr x20, [x15, #0x68]\n"
+ "smlal v9.4s, v17.4h, v31.4h\n"
+ "smlal2 v24.4s, v17.8h, v31.8h\n"
+ "smlal v2.4s, v17.4h, v5.4h\n"
+ "smlal2 v30.4s, v17.8h, v5.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 45f\n"
+ "ld1 { v23.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 44f\n"
+ "ld1 { v23.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 47f\n"
+ "ld1 { v23.b }[6], [x20]\n"
+ "b 47f\n"
+ "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 47f\n"
+ "ld1 { v23.b }[4], [x20]\n"
+ "b 47f\n"
+ "45:" // Oddments: Load (2, 3): Bit 2: Unset
+ "tbz x7, #1, 46f\n"
+ "ld1 { v23.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 47f\n"
+ "ld1 { v23.b }[2], [x20]\n"
+ "b 47f\n"
+ "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 47f\n"
+ "ld1 { v23.b }[0], [x20]\n"
+ "47:" // Oddments: Load (2, 3): Bit 2: End
+ "usubl v23.8h, v23.8b, v14.8b\n"
+ "ldr x20, [x15, #0x70]\n"
+ "smlal v7.4s, v23.4h, v20.4h\n"
+ "smlal2 v0.4s, v23.8h, v20.8h\n"
+ "smlal v10.4s, v23.4h, v18.4h\n"
+ "smlal2 v6.4s, v23.8h, v18.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 49f\n"
+ "ld1 { v5.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 48f\n"
+ "ld1 { v5.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 51f\n"
+ "ld1 { v5.b }[6], [x20]\n"
+ "b 51f\n"
+ "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 51f\n"
+ "ld1 { v5.b }[4], [x20]\n"
+ "b 51f\n"
+ "49:" // Oddments: Load (3, 1): Bit 2: Unset
+ "tbz x7, #1, 50f\n"
+ "ld1 { v5.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 51f\n"
+ "ld1 { v5.b }[2], [x20]\n"
+ "b 51f\n"
+ "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 51f\n"
+ "ld1 { v5.b }[0], [x20]\n"
+ "51:" // Oddments: Load (3, 1): Bit 2: End
+ "usubl v5.8h, v5.8b, v14.8b\n"
+ "ldr x20, [x15, #0x78]\n"
+ "smlal v2.4s, v5.4h, v25.4h\n"
+ "smlal2 v30.4s, v5.8h, v25.8h\n"
+ "smlal v10.4s, v5.4h, v31.4h\n"
+ "smlal2 v6.4s, v5.8h, v31.8h\n"
+ "add x20, x20, x17\n"
+ "tbz x7, #2, 53f\n"
+ "ld1 { v23.s }[0], [x20], #0x4\n"
+ "tbz x7, #1, 52f\n"
+ "ld1 { v23.h }[2], [x20], #0x2\n"
+ "tbz x7, #0, 55f\n"
+ "ld1 { v23.b }[6], [x20]\n"
+ "b 55f\n"
+ "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
+ "tbz x7, #0, 55f\n"
+ "ld1 { v23.b }[4], [x20]\n"
+ "b 55f\n"
+ "53:" // Oddments: Load (3, 2): Bit 2: Unset
+ "tbz x7, #1, 54f\n"
+ "ld1 { v23.h }[0], [x20], #0x2\n"
+ "tbz x7, #0, 55f\n"
+ "ld1 { v23.b }[2], [x20]\n"
+ "b 55f\n"
+ "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 55f\n"
+ "ld1 { v23.b }[0], [x20]\n"
+ "55:" // Oddments: Load (3, 2): Bit 2: End
+ "usubl v23.8h, v23.8b, v14.8b\n"
+ "smlal v2.4s, v23.4h, v20.4h\n"
+ "smlal2 v30.4s, v23.8h, v20.8h\n"
+ "smlal v10.4s, v23.4h, v25.4h\n"
+ "smlal2 v6.4s, v23.8h, v25.8h\n"
+ "tbz x7, #2, 57f\n"
+ "ld1 { v15.4s }, [x13], #0x10\n"
+ "ld1 { v19.4s }, [x12], #0x10\n"
+ "tbz x7, #1, 56f\n"
+ "ld1 { v18.d }[0], [x13], #0x8\n"
+ "ld1 { v22.d }[0], [x12], #0x8\n"
+ "tbz x7, #0, 59f\n"
+ "ld1 { v18.s }[2], [x13]\n"
+ "ld1 { v22.s }[2], [x12]\n"
+ "b 59f\n"
+ "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
+ "tbz x7, #0, 59f\n"
+ "ld1 { v18.s }[0], [x13]\n"
+ "ld1 { v22.s }[0], [x12]\n"
+ "b 59f\n"
+ "57:" // Oddments: Load requant params: Bit 2: Unset
+ "tbz x7, #1, 58f\n"
+ "ld1 { v15.d }[0], [x13], #0x8\n"
+ "ld1 { v19.d }[0], [x12], #0x8\n"
+ "tbz x7, #0, 59f\n"
+ "ld1 { v15.s }[2], [x13]\n"
+ "ld1 { v19.s }[2], [x12]\n"
+ "b 59f\n"
+ "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 59f\n"
+ "ld1 { v15.s }[0], [x13]\n"
+ "ld1 { v19.s }[0], [x12]\n"
+ "59:" // Oddments: Load requant params: Bit 2: End
+ "sqrdmulh v9.4s, v9.4s, v15.4s\n"
+ "and v17.16b, v9.16b, v19.16b\n"
+ "add x11, x11, x16\n"
+ "add x10, x10, x16\n"
+ "sqrdmulh v24.4s, v24.4s, v18.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "add x9, x9, x16\n"
+ "add x28, x28, x16\n"
+ "and v20.16b, v24.16b, v22.16b\n"
+ "sqrdmulh v7.4s, v7.4s, v15.4s\n"
+ "sqrdmulh v2.4s, v2.4s, v15.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v15.4s\n"
+ "sqadd v9.4s, v9.4s, v17.4s\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v21.16b, v7.16b, v19.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v18.4s\n"
+ "and v15.16b, v2.16b, v19.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v18.4s\n"
+ "and v23.16b, v10.16b, v19.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v18.4s\n"
+ "sqadd v24.4s, v24.4s, v20.4s\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v18.16b, v0.16b, v22.16b\n"
+ "sshr v15.4s, v15.4s, #0x1f\n"
+ "and v17.16b, v30.16b, v22.16b\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "and v28.16b, v6.16b, v22.16b\n"
+ "sqadd v7.4s, v7.4s, v21.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v2.4s, v2.4s, v15.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v23.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "srshl v9.4s, v9.4s, v19.4s\n"
+ "srshl v7.4s, v7.4s, v19.4s\n"
+ "sqadd v0.4s, v0.4s, v18.4s\n"
+ "srshl v2.4s, v2.4s, v19.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqadd v6.4s, v6.4s, v28.4s\n"
+ "srshl v24.4s, v24.4s, v22.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v0.4s, v0.4s, v22.4s\n"
+ "sqxtn v7.4h, v7.4s\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqxtn v2.4h, v2.4s\n"
+ "srshl v6.4s, v6.4s, v22.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "sqxtn2 v9.8h, v24.4s\n"
+ "sqxtn2 v7.8h, v0.4s\n"
+ "sqxtn2 v2.8h, v30.4s\n"
+ "sqxtn2 v10.8h, v6.4s\n"
+ "sqadd v9.8h, v9.8h, v13.8h\n"
+ "sqadd v7.8h, v7.8h, v13.8h\n"
+ "sqadd v2.8h, v2.8h, v13.8h\n"
+ "sqadd v10.8h, v10.8h, v13.8h\n"
+ "smax v9.8h, v9.8h, v29.8h\n"
+ "smax v7.8h, v7.8h, v29.8h\n"
+ "smax v2.8h, v2.8h, v29.8h\n"
+ "smax v10.8h, v10.8h, v29.8h\n"
+ "smin v9.8h, v9.8h, v12.8h\n"
+ "smin v7.8h, v7.8h, v12.8h\n"
+ "smin v2.8h, v2.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
+ "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "uzp1 v2.16b, v2.16b, v2.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "tbz x7, #2, 61f\n"
+ "st1 { v9.s }[0], [x11], #0x4\n"
+ "st1 { v7.s }[0], [x10], #0x4\n"
+ "st1 { v2.s }[0], [x9], #0x4\n"
+ "st1 { v10.s }[0], [x28], #0x4\n"
+ "tbz x7, #1, 60f\n"
+ "st1 { v9.h }[2], [x11], #0x2\n"
+ "st1 { v7.h }[2], [x10], #0x2\n"
+ "st1 { v2.h }[2], [x9], #0x2\n"
+ "st1 { v10.h }[2], [x28], #0x2\n"
+ "tbz x7, #0, 63f\n"
+ "st1 { v9.b }[6], [x11], #0x1\n"
+ "st1 { v7.b }[6], [x10], #0x1\n"
+ "st1 { v2.b }[6], [x9], #0x1\n"
+ "st1 { v10.b }[6], [x28], #0x1\n"
+ "b 63f\n"
+ "60:" // Oddments: Bit 2: Bit 1: Unset
+ "tbz x7, #0, 63f\n"
+ "st1 { v9.b }[4], [x11], #0x1\n"
+ "st1 { v7.b }[4], [x10], #0x1\n"
+ "st1 { v2.b }[4], [x9], #0x1\n"
+ "st1 { v10.b }[4], [x28], #0x1\n"
+ "b 63f\n"
+ "61:" // Oddments: Bit 2: Unset
+ "tbz x7, #1, 62f\n"
+ "st1 { v9.h }[0], [x11], #0x2\n"
+ "st1 { v7.h }[0], [x10], #0x2\n"
+ "st1 { v2.h }[0], [x9], #0x2\n"
+ "st1 { v10.h }[0], [x28], #0x2\n"
+ "tbz x7, #0, 63f\n"
+ "st1 { v9.b }[2], [x11], #0x1\n"
+ "st1 { v7.b }[2], [x10], #0x1\n"
+ "st1 { v2.b }[2], [x9], #0x1\n"
+ "st1 { v10.b }[2], [x28], #0x1\n"
+ "b 63f\n"
+ "62:" // Oddments: Bit 2: Unset: Bit 1: Unset
+ "tbz x7, #0, 63f\n"
+ "st1 { v9.b }[0], [x11], #0x1\n"
+ "st1 { v7.b }[0], [x10], #0x1\n"
+ "st1 { v2.b }[0], [x9], #0x1\n"
+ "st1 { v10.b }[0], [x28], #0x1\n"
+ "63:" // Oddments: Bit 2: End
+ "64:" // End
+ :
+ : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(__aarch64__)