aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp1923
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp375
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp321
5 files changed, 2471 insertions, 239 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp
new file mode 100644
index 0000000000..da5beef48c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018-2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+
+#include "../std_transforms_fixed.hpp"
+
+namespace arm_gemm
+{
+
+// Actual kernel implementations
+void a64_hybrid_fp32_mla_4x8(const float *, int, const float *, float *, int, int, int, int, const float *, Activation, bool);
+
+class hybrid_fp32_mla_4x8
+{
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, int, const float *, float *, int, int, int, int, const float *, Activation, bool);
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+ static constexpr bool supports_append()
+ {
+ return false;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ StdTransformsFixed<operand_type, result_type, 8, 4, 1> transforms = {};
+
+ // Default to the generic kernel
+ kern_type kernel=a64_hybrid_fp32_mla_4x8;
+
+ hybrid_fp32_mla_4x8(const CPUInfo *ci)
+ {
+ UNUSED(ci);
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp
new file mode 100644
index 0000000000..db7eb83160
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp
@@ -0,0 +1,1923 @@
+/*
+ * Copyright (c) 2018-2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include <algorithm>
+
+#include "arm_gemm.hpp"
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+ const int K_stride = K;
+ const long loops_count = ((K + 4) / 8) - 1;
+ K -= loops_count * 8;
+ const long regs_count = (K / 4) - 1;
+ K -= (regs_count + 1) * 4;
+ const long blocks_count = K / 1;
+ float nullbias[4];
+ if (!append && !bias) {
+ memset(nullbias, 0, (4 * sizeof(float)));
+ }
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ const float * const minptr = &minval;
+ const float * const maxptr = &maxval;
+
+ switch(act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ minval = 0.0f;
+ break;
+ }
+
+ for (int y=0; y<M; y+=8) {
+ const float * const a_ptr0_base = A + (y * lda);
+ const unsigned long ldab = lda * sizeof(float);
+
+ float *c_ptr0 = C + (y * ldc);
+
+ for (int x0=0; x0<N; x0+=4ul) {
+ const long width = std::min((unsigned long)N-x0, 4ul);
+ long loops = loops_count;
+ long regs = regs_count;
+ long blocks = blocks_count;
+ const float *a_ptr0 = a_ptr0_base;
+ const float *b_ptr0 = B + (K_stride * x0);
+ const bool use_result_buffer = (width < 4);
+ float result_buffer[32];
+ const unsigned long ldcb = (use_result_buffer ? 4 : ldc) * sizeof(float);
+ float *c_ptr_real = c_ptr0;
+ if (use_result_buffer && append) {
+ for(int cy=0; cy<std::min(M-y, 8); cy++) {
+ for(unsigned int cx=0; cx<width; cx++) {
+ result_buffer[cy * 4 + cx] = c_ptr_real[cy * ldc + cx];
+ }
+ }
+ }
+ if (use_result_buffer) {
+ c_ptr0 = result_buffer;
+ }
+ const float *biasptr = bias ? bias+x0 : nullbias;
+
+ switch(M-y) {
+ case 1:
+ __asm __volatile (
+ "ldr q24, [%[biasptr]]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
+ );
+ break;
+ case 2:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "c_ptr1 .req X1\n"
+ "ldr q24, [%[biasptr]]\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "mov v25.16b, v24.16b\n"
+ "ldr q1, [a_ptr1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "ldr q9, [a_ptr1]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "ldr q1, [a_ptr1, #-0x10]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "prfm PSTL1KEEP, [c_ptr1]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "ldr s1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmax v25.4s, v25.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "fmin v25.4s, v25.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ "str q25, [c_ptr1]\n"
+ ".unreq a_ptr1\n"
+ ".unreq c_ptr1\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory"
+ );
+ break;
+ case 3:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "c_ptr1 .req X2\n"
+ "c_ptr2 .req X3\n"
+ "ldr q24, [%[biasptr]]\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "mov v25.16b, v24.16b\n"
+ "ldr q1, [a_ptr1]\n"
+ "mov v26.16b, v24.16b\n"
+ "ldr q2, [a_ptr2]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "ldr q10, [a_ptr2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q1, [a_ptr1, #-0x10]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "ldr q2, [a_ptr2, #-0x10]\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "prfm PSTL1KEEP, [c_ptr1]\n"
+ "prfm PSTL1KEEP, [c_ptr2]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "ldr s1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr s2, [a_ptr2]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "add a_ptr2, a_ptr2, #0x4\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmax v25.4s, v25.4s, v22.4s\n"
+ "fmax v26.4s, v26.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "fmin v25.4s, v25.4s, v23.4s\n"
+ "fmin v26.4s, v26.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ "str q25, [c_ptr1]\n"
+ "str q26, [c_ptr2]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
+ );
+ break;
+ case 4:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "c_ptr1 .req X3\n"
+ "c_ptr2 .req X4\n"
+ "c_ptr3 .req X5\n"
+ "ldr q24, [%[biasptr]]\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "mov v25.16b, v24.16b\n"
+ "ldr q1, [a_ptr1]\n"
+ "mov v26.16b, v24.16b\n"
+ "ldr q2, [a_ptr2]\n"
+ "mov v27.16b, v24.16b\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "ldr q3, [a_ptr3]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q1, [a_ptr1, #-0x10]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "ldr q2, [a_ptr2, #-0x10]\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "add a_ptr3, a_ptr3, #0x20\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "ldr q3, [a_ptr3, #-0x10]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "prfm PLDL1KEEP, [a_ptr3, #0x40]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "prfm PSTL1KEEP, [c_ptr1]\n"
+ "prfm PSTL1KEEP, [c_ptr2]\n"
+ "prfm PSTL1KEEP, [c_ptr3]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "ldr s1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr s2, [a_ptr2]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "add a_ptr2, a_ptr2, #0x4\n"
+ "ldr s3, [a_ptr3]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "add a_ptr3, a_ptr3, #0x4\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmax v25.4s, v25.4s, v22.4s\n"
+ "fmax v26.4s, v26.4s, v22.4s\n"
+ "fmax v27.4s, v27.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "fmin v25.4s, v25.4s, v23.4s\n"
+ "fmin v26.4s, v26.4s, v23.4s\n"
+ "fmin v27.4s, v27.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ "str q25, [c_ptr1]\n"
+ "str q26, [c_ptr2]\n"
+ "str q27, [c_ptr3]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
+ );
+ break;
+ case 5:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "c_ptr1 .req X4\n"
+ "c_ptr2 .req X5\n"
+ "c_ptr3 .req X6\n"
+ "c_ptr4 .req X7\n"
+ "ldr q24, [%[biasptr]]\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "mov v25.16b, v24.16b\n"
+ "ldr q1, [a_ptr1]\n"
+ "mov v26.16b, v24.16b\n"
+ "ldr q2, [a_ptr2]\n"
+ "mov v27.16b, v24.16b\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "mov v28.16b, v24.16b\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "ldr q3, [a_ptr3]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "ldr q4, [a_ptr4]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "add a_ptr3, a_ptr3, #0x20\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q1, [a_ptr1, #-0x10]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "ldr q2, [a_ptr2, #-0x10]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "ldr q3, [a_ptr3, #-0x10]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "add a_ptr4, a_ptr4, #0x20\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "ldr q4, [a_ptr4, #-0x10]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "prfm PLDL1KEEP, [a_ptr3, #0x40]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "prfm PSTL1KEEP, [c_ptr1]\n"
+ "prfm PSTL1KEEP, [c_ptr2]\n"
+ "prfm PSTL1KEEP, [c_ptr3]\n"
+ "prfm PSTL1KEEP, [c_ptr4]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "ldr s1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr s2, [a_ptr2]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "add a_ptr2, a_ptr2, #0x4\n"
+ "ldr s3, [a_ptr3]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "add a_ptr3, a_ptr3, #0x4\n"
+ "ldr s4, [a_ptr4]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "add a_ptr4, a_ptr4, #0x4\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmax v25.4s, v25.4s, v22.4s\n"
+ "fmax v26.4s, v26.4s, v22.4s\n"
+ "fmax v27.4s, v27.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "fmin v25.4s, v25.4s, v23.4s\n"
+ "fmin v26.4s, v26.4s, v23.4s\n"
+ "fmin v27.4s, v27.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "fmax v28.4s, v28.4s, v22.4s\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ "str q25, [c_ptr1]\n"
+ "fmin v28.4s, v28.4s, v23.4s\n"
+ "str q26, [c_ptr2]\n"
+ "str q27, [c_ptr3]\n"
+ "str q28, [c_ptr4]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "cc", "memory"
+ );
+ break;
+ case 6:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "c_ptr1 .req X5\n"
+ "c_ptr2 .req X6\n"
+ "c_ptr3 .req X7\n"
+ "c_ptr4 .req X8\n"
+ "c_ptr5 .req X9\n"
+ "ldr q24, [%[biasptr]]\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "mov v25.16b, v24.16b\n"
+ "ldr q1, [a_ptr1]\n"
+ "mov v26.16b, v24.16b\n"
+ "ldr q2, [a_ptr2]\n"
+ "mov v27.16b, v24.16b\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "mov v28.16b, v24.16b\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "mov v29.16b, v24.16b\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "ldr q3, [a_ptr3]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "ldr q4, [a_ptr4]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "ldr q5, [a_ptr5]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "add a_ptr5, a_ptr5, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q13, [a_ptr5]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "add a_ptr3, a_ptr3, #0x20\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "add a_ptr4, a_ptr4, #0x20\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "add a_ptr5, a_ptr5, #0x20\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q1, [a_ptr1, #-0x10]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "ldr q2, [a_ptr2, #-0x10]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "ldr q3, [a_ptr3, #-0x10]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "ldr q4, [a_ptr4, #-0x10]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "ldr q5, [a_ptr5, #-0x10]\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr3, #0x40]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "fmla v29.4s, v16.4s, v13.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "fmla v29.4s, v17.4s, v13.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "fmla v29.4s, v18.4s, v13.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "fmla v29.4s, v19.4s, v13.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "prfm PSTL1KEEP, [c_ptr1]\n"
+ "prfm PSTL1KEEP, [c_ptr2]\n"
+ "prfm PSTL1KEEP, [c_ptr3]\n"
+ "prfm PSTL1KEEP, [c_ptr4]\n"
+ "prfm PSTL1KEEP, [c_ptr5]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "ldr q13, [a_ptr5]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr5, a_ptr5, #0x10\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "fmla v29.4s, v16.4s, v13.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "fmla v29.4s, v17.4s, v13.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "fmla v29.4s, v18.4s, v13.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "fmla v29.4s, v19.4s, v13.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "ldr s1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr s2, [a_ptr2]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "add a_ptr2, a_ptr2, #0x4\n"
+ "ldr s3, [a_ptr3]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "add a_ptr3, a_ptr3, #0x4\n"
+ "ldr s4, [a_ptr4]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "add a_ptr4, a_ptr4, #0x4\n"
+ "ldr s5, [a_ptr5]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "add a_ptr5, a_ptr5, #0x4\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmax v25.4s, v25.4s, v22.4s\n"
+ "fmax v26.4s, v26.4s, v22.4s\n"
+ "fmax v27.4s, v27.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "fmin v25.4s, v25.4s, v23.4s\n"
+ "fmin v26.4s, v26.4s, v23.4s\n"
+ "fmin v27.4s, v27.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "fmax v28.4s, v28.4s, v22.4s\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ "fmax v29.4s, v29.4s, v22.4s\n"
+ "str q25, [c_ptr1]\n"
+ "fmin v28.4s, v28.4s, v23.4s\n"
+ "fmin v29.4s, v29.4s, v23.4s\n"
+ "str q26, [c_ptr2]\n"
+ "str q27, [c_ptr3]\n"
+ "str q28, [c_ptr4]\n"
+ "str q29, [c_ptr5]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
+ );
+ break;
+ case 7:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "c_ptr1 .req X6\n"
+ "c_ptr2 .req X7\n"
+ "c_ptr3 .req X8\n"
+ "c_ptr4 .req X9\n"
+ "c_ptr5 .req X10\n"
+ "c_ptr6 .req X11\n"
+ "ldr q24, [%[biasptr]]\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "mov v25.16b, v24.16b\n"
+ "ldr q1, [a_ptr1]\n"
+ "mov v26.16b, v24.16b\n"
+ "ldr q2, [a_ptr2]\n"
+ "mov v27.16b, v24.16b\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "mov v28.16b, v24.16b\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "mov v29.16b, v24.16b\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "mov v30.16b, v24.16b\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "ldr q3, [a_ptr3]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "ldr q4, [a_ptr4]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "ldr q5, [a_ptr5]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "ldr q6, [a_ptr6]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "add a_ptr5, a_ptr5, #0x10\n"
+ "add a_ptr6, a_ptr6, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "ldr q13, [a_ptr5]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q14, [a_ptr6]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
+ "fmla v30.4s, v17.4s, v6.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr3, a_ptr3, #0x20\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "add a_ptr4, a_ptr4, #0x20\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "add a_ptr5, a_ptr5, #0x20\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "add a_ptr6, a_ptr6, #0x20\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "fmla v30.4s, v18.4s, v6.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q1, [a_ptr1, #-0x10]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "ldr q2, [a_ptr2, #-0x10]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "ldr q3, [a_ptr3, #-0x10]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "ldr q4, [a_ptr4, #-0x10]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "ldr q5, [a_ptr5, #-0x10]\n"
+ "fmla v30.4s, v19.4s, v6.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "ldr q6, [a_ptr6, #-0x10]\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr3, #0x40]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "fmla v29.4s, v16.4s, v13.s[0]\n"
+ "fmla v30.4s, v16.4s, v14.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "fmla v29.4s, v17.4s, v13.s[1]\n"
+ "fmla v30.4s, v17.4s, v14.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "fmla v29.4s, v18.4s, v13.s[2]\n"
+ "fmla v30.4s, v18.4s, v14.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "fmla v29.4s, v19.4s, v13.s[3]\n"
+ "fmla v30.4s, v19.4s, v14.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "prfm PSTL1KEEP, [c_ptr1]\n"
+ "prfm PSTL1KEEP, [c_ptr2]\n"
+ "prfm PSTL1KEEP, [c_ptr3]\n"
+ "prfm PSTL1KEEP, [c_ptr4]\n"
+ "prfm PSTL1KEEP, [c_ptr5]\n"
+ "prfm PSTL1KEEP, [c_ptr6]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "ldr q13, [a_ptr5]\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "ldr q14, [a_ptr6]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "fmla v30.4s, v17.4s, v6.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr5, a_ptr5, #0x10\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr6, a_ptr6, #0x10\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "fmla v30.4s, v18.4s, v6.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "fmla v30.4s, v19.4s, v6.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "fmla v29.4s, v16.4s, v13.s[0]\n"
+ "fmla v30.4s, v16.4s, v14.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "fmla v29.4s, v17.4s, v13.s[1]\n"
+ "fmla v30.4s, v17.4s, v14.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "fmla v29.4s, v18.4s, v13.s[2]\n"
+ "fmla v30.4s, v18.4s, v14.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "fmla v29.4s, v19.4s, v13.s[3]\n"
+ "fmla v30.4s, v19.4s, v14.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "fmla v30.4s, v17.4s, v6.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "fmla v30.4s, v18.4s, v6.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "fmla v30.4s, v19.4s, v6.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "ldr s1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr s2, [a_ptr2]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "add a_ptr2, a_ptr2, #0x4\n"
+ "ldr s3, [a_ptr3]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "add a_ptr3, a_ptr3, #0x4\n"
+ "ldr s4, [a_ptr4]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "add a_ptr4, a_ptr4, #0x4\n"
+ "ldr s5, [a_ptr5]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "add a_ptr5, a_ptr5, #0x4\n"
+ "ldr s6, [a_ptr6]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "add a_ptr6, a_ptr6, #0x4\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmax v25.4s, v25.4s, v22.4s\n"
+ "fmax v26.4s, v26.4s, v22.4s\n"
+ "fmax v27.4s, v27.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "fmin v25.4s, v25.4s, v23.4s\n"
+ "fmin v26.4s, v26.4s, v23.4s\n"
+ "fmin v27.4s, v27.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "fmax v28.4s, v28.4s, v22.4s\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ "fmax v29.4s, v29.4s, v22.4s\n"
+ "str q25, [c_ptr1]\n"
+ "fmax v30.4s, v30.4s, v22.4s\n"
+ "fmin v28.4s, v28.4s, v23.4s\n"
+ "fmin v29.4s, v29.4s, v23.4s\n"
+ "str q26, [c_ptr2]\n"
+ "fmin v30.4s, v30.4s, v23.4s\n"
+ "str q27, [c_ptr3]\n"
+ "str q28, [c_ptr4]\n"
+ "str q29, [c_ptr5]\n"
+ "str q30, [c_ptr6]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "cc", "memory"
+ );
+ break;
+ default:
+ case 8:
+ __asm __volatile (
+ "a_ptr1 .req X0\n"
+ "a_ptr2 .req X1\n"
+ "a_ptr3 .req X2\n"
+ "a_ptr4 .req X3\n"
+ "a_ptr5 .req X4\n"
+ "a_ptr6 .req X5\n"
+ "a_ptr7 .req X6\n"
+ "c_ptr1 .req X7\n"
+ "c_ptr2 .req X8\n"
+ "c_ptr3 .req X9\n"
+ "c_ptr4 .req X10\n"
+ "c_ptr5 .req X11\n"
+ "c_ptr6 .req X12\n"
+ "c_ptr7 .req X13\n"
+ "ldr q24, [%[biasptr]]\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "mov v25.16b, v24.16b\n"
+ "ldr q1, [a_ptr1]\n"
+ "mov v26.16b, v24.16b\n"
+ "ldr q2, [a_ptr2]\n"
+ "mov v27.16b, v24.16b\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "mov v28.16b, v24.16b\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "mov v29.16b, v24.16b\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "mov v30.16b, v24.16b\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "mov v31.16b, v24.16b\n"
+ "ldr q3, [a_ptr3]\n"
+ "add a_ptr4, a_ptr3, %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "ldr q4, [a_ptr4]\n"
+ "add a_ptr5, a_ptr4, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "ldr q5, [a_ptr5]\n"
+ "add a_ptr6, a_ptr5, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "ldr q6, [a_ptr6]\n"
+ "add a_ptr7, a_ptr6, %[lda]\n"
+ "add c_ptr4, c_ptr3, %[ldc]\n"
+ "ldr q7, [a_ptr7]\n"
+ "add c_ptr5, c_ptr4, %[ldc]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "add c_ptr6, c_ptr5, %[ldc]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add c_ptr7, c_ptr6, %[ldc]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "add a_ptr5, a_ptr5, #0x10\n"
+ "add a_ptr6, a_ptr6, #0x10\n"
+ "add a_ptr7, a_ptr7, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "ldr q13, [a_ptr5]\n"
+ "fmla v31.4s, v16.4s, v7.s[0]\n"
+ "ldr q14, [a_ptr6]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q15, [a_ptr7]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
+ "fmla v30.4s, v17.4s, v6.s[1]\n"
+ "add a_ptr2, a_ptr2, #0x20\n"
+ "fmla v31.4s, v17.4s, v7.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr3, a_ptr3, #0x20\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr4, a_ptr4, #0x20\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "add a_ptr5, a_ptr5, #0x20\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "add a_ptr6, a_ptr6, #0x20\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "add a_ptr7, a_ptr7, #0x20\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "fmla v30.4s, v18.4s, v6.s[2]\n"
+ "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
+ "fmla v31.4s, v18.4s, v7.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "ldr q0, [%[a_ptr0], #-0x10]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "ldr q1, [a_ptr1, #-0x10]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "ldr q2, [a_ptr2, #-0x10]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "ldr q3, [a_ptr3, #-0x10]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "ldr q4, [a_ptr4, #-0x10]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "ldr q5, [a_ptr5, #-0x10]\n"
+ "fmla v30.4s, v19.4s, v6.s[3]\n"
+ "ldr q6, [a_ptr6, #-0x10]\n"
+ "fmla v31.4s, v19.4s, v7.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "ldr q7, [a_ptr7, #-0x10]\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "prfm PLDL1KEEP, [a_ptr3, #0x40]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "fmla v29.4s, v16.4s, v13.s[0]\n"
+ "fmla v30.4s, v16.4s, v14.s[0]\n"
+ "fmla v31.4s, v16.4s, v15.s[0]\n"
+ "ldr q16, [%[b_ptr0], #0x40]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "fmla v29.4s, v17.4s, v13.s[1]\n"
+ "fmla v30.4s, v17.4s, v14.s[1]\n"
+ "fmla v31.4s, v17.4s, v15.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x50]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "fmla v29.4s, v18.4s, v13.s[2]\n"
+ "fmla v30.4s, v18.4s, v14.s[2]\n"
+ "fmla v31.4s, v18.4s, v15.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x60]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "fmla v29.4s, v19.4s, v13.s[3]\n"
+ "fmla v30.4s, v19.4s, v14.s[3]\n"
+ "fmla v31.4s, v19.4s, v15.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "ldr q19, [%[b_ptr0], #-0x10]\n"
+ "prfm PSTL1KEEP, [%[c_ptr0]]\n"
+ "prfm PSTL1KEEP, [c_ptr1]\n"
+ "prfm PSTL1KEEP, [c_ptr2]\n"
+ "prfm PSTL1KEEP, [c_ptr3]\n"
+ "prfm PSTL1KEEP, [c_ptr4]\n"
+ "prfm PSTL1KEEP, [c_ptr5]\n"
+ "prfm PSTL1KEEP, [c_ptr6]\n"
+ "prfm PSTL1KEEP, [c_ptr7]\n"
+ "cbz %[regs], 3f\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr q8, [%[a_ptr0]]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "ldr q9, [a_ptr1]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "ldr q10, [a_ptr2]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "ldr q11, [a_ptr3]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "ldr q12, [a_ptr4]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "ldr q13, [a_ptr5]\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "ldr q14, [a_ptr6]\n"
+ "fmla v31.4s, v16.4s, v7.s[0]\n"
+ "ldr q15, [a_ptr7]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "add a_ptr4, a_ptr4, #0x10\n"
+ "fmla v30.4s, v17.4s, v6.s[1]\n"
+ "add a_ptr5, a_ptr5, #0x10\n"
+ "fmla v31.4s, v17.4s, v7.s[1]\n"
+ "ldr q17, [%[b_ptr0], #0x10]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "add a_ptr6, a_ptr6, #0x10\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "add a_ptr7, a_ptr7, #0x10\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "fmla v30.4s, v18.4s, v6.s[2]\n"
+ "fmla v31.4s, v18.4s, v7.s[2]\n"
+ "ldr q18, [%[b_ptr0], #0x20]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "fmla v30.4s, v19.4s, v6.s[3]\n"
+ "fmla v31.4s, v19.4s, v7.s[3]\n"
+ "ldr q19, [%[b_ptr0], #0x30]\n"
+ "fmla v24.4s, v16.4s, v8.s[0]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x40\n"
+ "fmla v25.4s, v16.4s, v9.s[0]\n"
+ "fmla v26.4s, v16.4s, v10.s[0]\n"
+ "fmla v27.4s, v16.4s, v11.s[0]\n"
+ "fmla v28.4s, v16.4s, v12.s[0]\n"
+ "fmla v29.4s, v16.4s, v13.s[0]\n"
+ "fmla v30.4s, v16.4s, v14.s[0]\n"
+ "fmla v31.4s, v16.4s, v15.s[0]\n"
+ "fmla v24.4s, v17.4s, v8.s[1]\n"
+ "fmla v25.4s, v17.4s, v9.s[1]\n"
+ "fmla v26.4s, v17.4s, v10.s[1]\n"
+ "fmla v27.4s, v17.4s, v11.s[1]\n"
+ "fmla v28.4s, v17.4s, v12.s[1]\n"
+ "fmla v29.4s, v17.4s, v13.s[1]\n"
+ "fmla v30.4s, v17.4s, v14.s[1]\n"
+ "fmla v31.4s, v17.4s, v15.s[1]\n"
+ "fmla v24.4s, v18.4s, v8.s[2]\n"
+ "fmla v25.4s, v18.4s, v9.s[2]\n"
+ "fmla v26.4s, v18.4s, v10.s[2]\n"
+ "fmla v27.4s, v18.4s, v11.s[2]\n"
+ "fmla v28.4s, v18.4s, v12.s[2]\n"
+ "fmla v29.4s, v18.4s, v13.s[2]\n"
+ "fmla v30.4s, v18.4s, v14.s[2]\n"
+ "fmla v31.4s, v18.4s, v15.s[2]\n"
+ "fmla v24.4s, v19.4s, v8.s[3]\n"
+ "fmla v25.4s, v19.4s, v9.s[3]\n"
+ "fmla v26.4s, v19.4s, v10.s[3]\n"
+ "fmla v27.4s, v19.4s, v11.s[3]\n"
+ "fmla v28.4s, v19.4s, v12.s[3]\n"
+ "fmla v29.4s, v19.4s, v13.s[3]\n"
+ "fmla v30.4s, v19.4s, v14.s[3]\n"
+ "fmla v31.4s, v19.4s, v15.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "fmla v31.4s, v16.4s, v7.s[0]\n"
+ "fmla v24.4s, v17.4s, v0.s[1]\n"
+ "fmla v25.4s, v17.4s, v1.s[1]\n"
+ "fmla v26.4s, v17.4s, v2.s[1]\n"
+ "fmla v27.4s, v17.4s, v3.s[1]\n"
+ "fmla v28.4s, v17.4s, v4.s[1]\n"
+ "fmla v29.4s, v17.4s, v5.s[1]\n"
+ "fmla v30.4s, v17.4s, v6.s[1]\n"
+ "fmla v31.4s, v17.4s, v7.s[1]\n"
+ "fmla v24.4s, v18.4s, v0.s[2]\n"
+ "fmla v25.4s, v18.4s, v1.s[2]\n"
+ "fmla v26.4s, v18.4s, v2.s[2]\n"
+ "fmla v27.4s, v18.4s, v3.s[2]\n"
+ "fmla v28.4s, v18.4s, v4.s[2]\n"
+ "fmla v29.4s, v18.4s, v5.s[2]\n"
+ "fmla v30.4s, v18.4s, v6.s[2]\n"
+ "fmla v31.4s, v18.4s, v7.s[2]\n"
+ "fmla v24.4s, v19.4s, v0.s[3]\n"
+ "fmla v25.4s, v19.4s, v1.s[3]\n"
+ "fmla v26.4s, v19.4s, v2.s[3]\n"
+ "fmla v27.4s, v19.4s, v3.s[3]\n"
+ "fmla v28.4s, v19.4s, v4.s[3]\n"
+ "fmla v29.4s, v19.4s, v5.s[3]\n"
+ "fmla v30.4s, v19.4s, v6.s[3]\n"
+ "fmla v31.4s, v19.4s, v7.s[3]\n"
+ "4:\n"
+ "cbz %[blocks], 5f\n"
+ "6:\n"
+ "ldr q16, [%[b_ptr0]]\n"
+ "subs %[blocks], %[blocks], #0x1\n"
+ "add %[b_ptr0], %[b_ptr0], #0x10\n"
+ "ldr s0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x4\n"
+ "ldr s1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x4\n"
+ "fmla v24.4s, v16.4s, v0.s[0]\n"
+ "ldr s2, [a_ptr2]\n"
+ "fmla v25.4s, v16.4s, v1.s[0]\n"
+ "add a_ptr2, a_ptr2, #0x4\n"
+ "ldr s3, [a_ptr3]\n"
+ "fmla v26.4s, v16.4s, v2.s[0]\n"
+ "add a_ptr3, a_ptr3, #0x4\n"
+ "ldr s4, [a_ptr4]\n"
+ "fmla v27.4s, v16.4s, v3.s[0]\n"
+ "add a_ptr4, a_ptr4, #0x4\n"
+ "ldr s5, [a_ptr5]\n"
+ "fmla v28.4s, v16.4s, v4.s[0]\n"
+ "add a_ptr5, a_ptr5, #0x4\n"
+ "ldr s6, [a_ptr6]\n"
+ "fmla v29.4s, v16.4s, v5.s[0]\n"
+ "add a_ptr6, a_ptr6, #0x4\n"
+ "ldr s7, [a_ptr7]\n"
+ "fmla v30.4s, v16.4s, v6.s[0]\n"
+ "add a_ptr7, a_ptr7, #0x4\n"
+ "fmla v31.4s, v16.4s, v7.s[0]\n"
+ "b.ne 6b\n"
+ "5:\n"
+ "ld1r {v22.4s}, [%[minptr]]\n"
+ "ld1r {v23.4s}, [%[maxptr]]\n"
+ "fmax v24.4s, v24.4s, v22.4s\n"
+ "fmax v25.4s, v25.4s, v22.4s\n"
+ "fmax v26.4s, v26.4s, v22.4s\n"
+ "fmax v27.4s, v27.4s, v22.4s\n"
+ "fmin v24.4s, v24.4s, v23.4s\n"
+ "fmin v25.4s, v25.4s, v23.4s\n"
+ "fmin v26.4s, v26.4s, v23.4s\n"
+ "fmin v27.4s, v27.4s, v23.4s\n"
+ "str q24, [%[c_ptr0]]\n"
+ "fmax v28.4s, v28.4s, v22.4s\n"
+ "add %[c_ptr0], %[c_ptr0], #0x10\n"
+ "fmax v29.4s, v29.4s, v22.4s\n"
+ "str q25, [c_ptr1]\n"
+ "fmax v30.4s, v30.4s, v22.4s\n"
+ "fmin v28.4s, v28.4s, v23.4s\n"
+ "fmax v31.4s, v31.4s, v22.4s\n"
+ "str q26, [c_ptr2]\n"
+ "fmin v29.4s, v29.4s, v23.4s\n"
+ "fmin v30.4s, v30.4s, v23.4s\n"
+ "fmin v31.4s, v31.4s, v23.4s\n"
+ "str q27, [c_ptr3]\n"
+ "str q28, [c_ptr4]\n"
+ "str q29, [c_ptr5]\n"
+ "str q30, [c_ptr6]\n"
+ "str q31, [c_ptr7]\n"
+ ".unreq a_ptr1\n"
+ ".unreq a_ptr2\n"
+ ".unreq a_ptr3\n"
+ ".unreq a_ptr4\n"
+ ".unreq a_ptr5\n"
+ ".unreq a_ptr6\n"
+ ".unreq a_ptr7\n"
+ ".unreq c_ptr1\n"
+ ".unreq c_ptr2\n"
+ ".unreq c_ptr3\n"
+ ".unreq c_ptr4\n"
+ ".unreq c_ptr5\n"
+ ".unreq c_ptr6\n"
+ ".unreq c_ptr7\n"
+ : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
+ : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "cc", "memory"
+ );
+ break;
+ }
+ if (use_result_buffer) {
+ for(int cy=0; cy<std::min(M-y, 8); cy++) {
+ for(unsigned int cx=0; cx<width; cx++) {
+ c_ptr_real[cy * ldc + cx] = result_buffer[cy * 4 + cx];
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp
index 5a6fabcfa9..bdc62ea181 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp
@@ -61,7 +61,7 @@ public:
static constexpr bool supports_append()
{
- return false;
+ return true;
}
static constexpr bool supports_bias()
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp
index 3ecf0151aa..7c08aa2165 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp
@@ -35,7 +35,6 @@ namespace arm_gemm {
void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *bias, Activation act, bool append) {
UNUSED(bias);
UNUSED(act);
-
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -80,6 +79,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"temploadreg1 .req X1\n"
"temploadreg2 .req X2\n"
"temploadreg3 .req X3\n"
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -95,8 +95,26 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"ldr d14, [%[b_ptr0], #0x60]\n"
"ldr temploadreg2, [%[b_ptr0], #0x68]\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr d14, [%[b_ptr0], #0x60]\n"
+ "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ins v14.d[1], temploadreg2\n"
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
@@ -236,14 +254,14 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"ins v11.d[1], temploadreg3\n"
"ins v12.d[1], temploadreg0\n"
"ins v13.d[1], temploadreg1\n"
- "b.ne 2b\n"
- "1:\n"
+ "b.ne 3b\n"
+ "2:\n"
"ins v14.d[1], temploadreg2\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
"ldr d15, [%[b_ptr0], #-0x10]\n"
"ldr temploadreg3, [%[b_ptr0], #-0x8]\n"
"ins v15.d[1], temploadreg3\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr d4, [%[a_ptr0]]\n"
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
@@ -354,8 +372,8 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa4e9b1 // sdot v17.4s, v13.16b, v4.4b[3]\n"
".inst 0x4fa4e9d2 // sdot v18.4s, v14.16b, v4.4b[3]\n"
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr d8, [%[b_ptr0]]\n"
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
@@ -397,9 +415,9 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa0e9b1 // sdot v17.4s, v13.16b, v0.4b[3]\n"
".inst 0x4fa0e9d2 // sdot v18.4s, v14.16b, v0.4b[3]\n"
".inst 0x4fa0e9f3 // sdot v19.4s, v15.16b, v0.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -412,17 +430,17 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
".inst 0x4f80e152 // sdot v18.4s, v10.16b, v0.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -431,7 +449,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
".inst 0x4f80e152 // sdot v18.4s, v10.16b, v0.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"
@@ -454,74 +472,99 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"temploadreg1 .req X3\n"
"temploadreg2 .req X4\n"
"temploadreg3 .req X5\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
- "ldr q8, [%[b_ptr0]]\n"
+ "ldr q1, [a_ptr1]\n"
"movi v18.4s, #0\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q8, [%[b_ptr0]]\n"
"movi v19.4s, #0\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
"movi v20.4s, #0\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
"movi v21.4s, #0\n"
- "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
"movi v22.4s, #0\n"
- "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
"movi v23.4s, #0\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
"ldr d14, [%[b_ptr0], #0x60]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"ldr temploadreg2, [%[b_ptr0], #0x68]\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "ldr q1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q20, [c_ptr1]\n"
+ "ldr q21, [c_ptr1, #0x10]\n"
+ "ldr q22, [c_ptr1, #0x20]\n"
+ "ldr q23, [c_ptr1, #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ins v14.d[1], temploadreg2\n"
+ "ldr q1, [a_ptr1]\n"
"add a_ptr1, a_ptr1, #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr d14, [%[b_ptr0], #0x60]\n"
+ "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
- "ldr d15, [%[b_ptr0], #-0x10]\n"
+ "ins v14.d[1], temploadreg2\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
- "ldr temploadreg3, [%[b_ptr0], #-0x8]\n"
+ "ldr d15, [%[b_ptr0], #-0x10]\n"
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
- "ldr d4, [%[a_ptr0]]\n"
+ "ldr temploadreg3, [%[b_ptr0], #-0x8]\n"
".inst 0x4f81e135 // sdot v21.4s, v9.16b, v1.4b[0]\n"
- "ldr temploadreg0, [%[a_ptr0], #0x8]\n"
+ "ldr d4, [%[a_ptr0]]\n"
".inst 0x4f80e152 // sdot v18.4s, v10.16b, v0.4b[0]\n"
- "ldr d5, [a_ptr1]\n"
+ "ldr temploadreg0, [%[a_ptr0], #0x8]\n"
".inst 0x4f81e156 // sdot v22.4s, v10.16b, v1.4b[0]\n"
- "ldr temploadreg1, [a_ptr1, #0x8]\n"
+ "ldr d5, [a_ptr1]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
- "ldr d8, [%[b_ptr0]]\n"
+ "ldr temploadreg1, [a_ptr1, #0x8]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
- "ins v4.d[1], temploadreg0\n"
+ "ldr d8, [%[b_ptr0]]\n"
".inst 0x4fa0e190 // sdot v16.4s, v12.16b, v0.4b[1]\n"
- "ldr temploadreg0, [%[b_ptr0], #0x8]\n"
+ "ins v4.d[1], temploadreg0\n"
".inst 0x4fa1e194 // sdot v20.4s, v12.16b, v1.4b[1]\n"
- "ldr d9, [%[b_ptr0], #0x10]\n"
+ "ldr temploadreg0, [%[b_ptr0], #0x8]\n"
".inst 0x4fa0e1b1 // sdot v17.4s, v13.16b, v0.4b[1]\n"
- "ins v5.d[1], temploadreg1\n"
+ "ldr d9, [%[b_ptr0], #0x10]\n"
".inst 0x4fa1e1b5 // sdot v21.4s, v13.16b, v1.4b[1]\n"
- "ldr temploadreg1, [%[b_ptr0], #0x18]\n"
+ "ins v5.d[1], temploadreg1\n"
".inst 0x4fa0e1d2 // sdot v18.4s, v14.16b, v0.4b[1]\n"
- "ldr d10, [%[b_ptr0], #0x20]\n"
+ "ldr temploadreg1, [%[b_ptr0], #0x18]\n"
".inst 0x4fa1e1d6 // sdot v22.4s, v14.16b, v1.4b[1]\n"
+ "ldr d10, [%[b_ptr0], #0x20]\n"
"ldr temploadreg2, [%[b_ptr0], #0x28]\n"
- "ldr d11, [%[b_ptr0], #0x30]\n"
"subs %[loops], %[loops], #0x1\n"
- "ins v15.d[1], temploadreg3\n"
+ "ldr d11, [%[b_ptr0], #0x30]\n"
"prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
- "ldr temploadreg3, [%[b_ptr0], #0x38]\n"
+ "ins v15.d[1], temploadreg3\n"
"add %[a_ptr0], %[a_ptr0], #0x20\n"
+ "ldr temploadreg3, [%[b_ptr0], #0x38]\n"
+ "add a_ptr1, a_ptr1, #0x20\n"
".inst 0x4fa0e1f3 // sdot v19.4s, v15.16b, v0.4b[1]\n"
"ldr d12, [%[b_ptr0], #0x40]\n"
".inst 0x4fa1e1f7 // sdot v23.4s, v15.16b, v1.4b[1]\n"
"ins v8.d[1], temploadreg0\n"
"ldr temploadreg0, [%[b_ptr0], #0x48]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "ldr d13, [%[b_ptr0], #0x50]\n"
"prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
+ "ldr d13, [%[b_ptr0], #0x50]\n"
".inst 0x4f80e910 // sdot v16.4s, v8.16b, v0.4b[2]\n"
"ins v9.d[1], temploadreg1\n"
".inst 0x4f81e914 // sdot v20.4s, v8.16b, v1.4b[2]\n"
@@ -658,15 +701,15 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"ins v11.d[1], temploadreg3\n"
"ins v12.d[1], temploadreg0\n"
"ins v13.d[1], temploadreg1\n"
+ "b.ne 3b\n"
+ "2:\n"
"ins v14.d[1], temploadreg2\n"
- "b.ne 2b\n"
- "1:\n"
- "ldr d15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
- "ldr temploadreg3, [%[b_ptr0], #-0x8]\n"
+ "ldr d15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [c_ptr1]\n"
+ "ldr temploadreg3, [%[b_ptr0], #-0x8]\n"
"ins v15.d[1], temploadreg3\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr d4, [%[a_ptr0]]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -813,8 +856,8 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa5e9d6 // sdot v22.4s, v14.16b, v5.4b[3]\n"
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr temploadreg0, [%[b_ptr0], #0x8]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -872,9 +915,9 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa1e9d6 // sdot v22.4s, v14.16b, v1.4b[3]\n"
".inst 0x4fa0e9f3 // sdot v19.4s, v15.16b, v0.4b[3]\n"
".inst 0x4fa1e9f7 // sdot v23.4s, v15.16b, v1.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -893,20 +936,20 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f81e156 // sdot v22.4s, v10.16b, v1.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[0], [a_ptr1], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[1], [a_ptr1], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
"ld1 {v1.b}[2], [a_ptr1]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -919,7 +962,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f81e156 // sdot v22.4s, v10.16b, v1.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"
@@ -950,40 +993,72 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"temploadreg1 .req X5\n"
"temploadreg2 .req X6\n"
"temploadreg3 .req X7\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
- "ldr q8, [%[b_ptr0]]\n"
+ "ldr q1, [a_ptr1]\n"
"movi v18.4s, #0\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q2, [a_ptr2]\n"
"movi v19.4s, #0\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q8, [%[b_ptr0]]\n"
"movi v20.4s, #0\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
"movi v21.4s, #0\n"
- "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
"movi v22.4s, #0\n"
- "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
"movi v23.4s, #0\n"
- "ldr d14, [%[b_ptr0], #0x60]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
"movi v24.4s, #0\n"
- "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
"movi v25.4s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr d14, [%[b_ptr0], #0x60]\n"
"movi v26.4s, #0\n"
- "ldr q1, [a_ptr1]\n"
+ "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
"movi v27.4s, #0\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
"ins v14.d[1], temploadreg2\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "ldr q2, [a_ptr2]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q20, [c_ptr1]\n"
+ "ldr q21, [c_ptr1, #0x10]\n"
+ "ldr q22, [c_ptr1, #0x20]\n"
+ "ldr q23, [c_ptr1, #0x30]\n"
+ "ldr q24, [c_ptr2]\n"
+ "ldr q25, [c_ptr2, #0x10]\n"
+ "ldr q26, [c_ptr2, #0x20]\n"
+ "ldr q27, [c_ptr2, #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "ldr q1, [a_ptr1]\n"
"add a_ptr1, a_ptr1, #0x10\n"
+ "ldr q2, [a_ptr2]\n"
"add a_ptr2, a_ptr2, #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr d14, [%[b_ptr0], #0x60]\n"
+ "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "ins v14.d[1], temploadreg2\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr d15, [%[b_ptr0], #-0x10]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -1203,15 +1278,15 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"ins v12.d[1], temploadreg0\n"
"ins v13.d[1], temploadreg1\n"
"ins v14.d[1], temploadreg2\n"
- "b.ne 2b\n"
- "1:\n"
+ "b.ne 3b\n"
+ "2:\n"
"ldr d15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
"ldr temploadreg3, [%[b_ptr0], #-0x8]\n"
"prfm PSTL1KEEP, [c_ptr1]\n"
"prfm PSTL1KEEP, [c_ptr2]\n"
"ins v15.d[1], temploadreg3\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr d4, [%[a_ptr0]]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -1394,8 +1469,8 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
".inst 0x4fa6e9fb // sdot v27.4s, v15.16b, v6.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr temploadreg0, [%[b_ptr0], #0x8]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -1469,9 +1544,9 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa0e9f3 // sdot v19.4s, v15.16b, v0.4b[3]\n"
".inst 0x4fa1e9f7 // sdot v23.4s, v15.16b, v1.4b[3]\n"
".inst 0x4fa2e9fb // sdot v27.4s, v15.16b, v2.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -1496,23 +1571,23 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[0], [a_ptr1], #1\n"
"ld1 {v2.b}[0], [a_ptr2], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[1], [a_ptr1], #1\n"
"ld1 {v2.b}[1], [a_ptr2], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
"ld1 {v1.b}[2], [a_ptr1]\n"
"ld1 {v2.b}[2], [a_ptr2]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -1529,7 +1604,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"
@@ -1569,48 +1644,86 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"temploadreg1 .req X7\n"
"temploadreg2 .req X8\n"
"temploadreg3 .req X9\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
- "ldr q8, [%[b_ptr0]]\n"
+ "ldr q1, [a_ptr1]\n"
"movi v18.4s, #0\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q2, [a_ptr2]\n"
"movi v19.4s, #0\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q3, [a_ptr3]\n"
"movi v20.4s, #0\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q8, [%[b_ptr0]]\n"
"movi v21.4s, #0\n"
- "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
"movi v22.4s, #0\n"
- "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
"movi v23.4s, #0\n"
- "ldr d14, [%[b_ptr0], #0x60]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
"movi v24.4s, #0\n"
- "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
"movi v25.4s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
"movi v26.4s, #0\n"
- "ldr q1, [a_ptr1]\n"
+ "ldr d14, [%[b_ptr0], #0x60]\n"
"movi v27.4s, #0\n"
- "ins v14.d[1], temploadreg2\n"
+ "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
"movi v28.4s, #0\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"movi v29.4s, #0\n"
- "ldr q2, [a_ptr2]\n"
+ "ins v14.d[1], temploadreg2\n"
"movi v30.4s, #0\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
"movi v31.4s, #0\n"
- "ldr q3, [a_ptr3]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q20, [c_ptr1]\n"
+ "ldr q21, [c_ptr1, #0x10]\n"
+ "ldr q22, [c_ptr1, #0x20]\n"
+ "ldr q23, [c_ptr1, #0x30]\n"
+ "ldr q24, [c_ptr2]\n"
+ "ldr q25, [c_ptr2, #0x10]\n"
+ "ldr q26, [c_ptr2, #0x20]\n"
+ "ldr q27, [c_ptr2, #0x30]\n"
+ "ldr q28, [c_ptr3]\n"
+ "ldr q29, [c_ptr3, #0x10]\n"
+ "ldr q30, [c_ptr3, #0x20]\n"
+ "ldr q31, [c_ptr3, #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
+ "ldr q1, [a_ptr1]\n"
"add a_ptr1, a_ptr1, #0x10\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
+ "ldr q2, [a_ptr2]\n"
"add a_ptr2, a_ptr2, #0x10\n"
+ "ldr q3, [a_ptr3]\n"
"add a_ptr3, a_ptr3, #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr d14, [%[b_ptr0], #0x60]\n"
+ "ldr temploadreg2, [%[b_ptr0], #0x68]\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "ins v14.d[1], temploadreg2\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr d15, [%[b_ptr0], #-0x10]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -1870,8 +1983,8 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"ins v13.d[1], temploadreg1\n"
"prfm PLDL1KEEP, [a_ptr3, #0x40]\n"
"ins v14.d[1], temploadreg2\n"
- "b.ne 2b\n"
- "1:\n"
+ "b.ne 3b\n"
+ "2:\n"
"ldr d15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
"ldr temploadreg3, [%[b_ptr0], #-0x8]\n"
@@ -1879,7 +1992,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"prfm PSTL1KEEP, [c_ptr2]\n"
"prfm PSTL1KEEP, [c_ptr3]\n"
"ins v15.d[1], temploadreg3\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr d4, [%[a_ptr0]]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -2098,8 +2211,8 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
".inst 0x4fa6e9fb // sdot v27.4s, v15.16b, v6.4b[3]\n"
".inst 0x4fa7e9ff // sdot v31.4s, v15.16b, v7.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr temploadreg0, [%[b_ptr0], #0x8]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -2189,9 +2302,9 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4fa1e9f7 // sdot v23.4s, v15.16b, v1.4b[3]\n"
".inst 0x4fa2e9fb // sdot v27.4s, v15.16b, v2.4b[3]\n"
".inst 0x4fa3e9ff // sdot v31.4s, v15.16b, v3.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -2222,26 +2335,26 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
".inst 0x4f83e17f // sdot v31.4s, v11.16b, v3.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[0], [a_ptr1], #1\n"
"ld1 {v2.b}[0], [a_ptr2], #1\n"
"ld1 {v3.b}[0], [a_ptr3], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[1], [a_ptr1], #1\n"
"ld1 {v2.b}[1], [a_ptr2], #1\n"
"ld1 {v3.b}[1], [a_ptr3], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
"ld1 {v1.b}[2], [a_ptr1]\n"
"ld1 {v2.b}[2], [a_ptr2]\n"
"ld1 {v3.b}[2], [a_ptr3]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -2262,7 +2375,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
".inst 0x4f83e17f // sdot v31.4s, v11.16b, v3.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp
index b48b674621..9f06a48ff5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp
@@ -35,7 +35,6 @@ namespace arm_gemm {
void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *bias, Activation act, bool append) {
UNUSED(bias);
UNUSED(act);
-
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -76,6 +75,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
switch(M-y) {
case 1:
__asm __volatile (
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -90,8 +90,25 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"ldr q13, [%[b_ptr0], #0x50]\n"
"ldr q14, [%[b_ptr0], #0x60]\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q14, [%[b_ptr0], #0x60]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
@@ -163,11 +180,11 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"ldr q12, [%[b_ptr0], #-0x40]\n"
"ldr q13, [%[b_ptr0], #-0x30]\n"
"ldr q14, [%[b_ptr0], #-0x20]\n"
- "b.ne 2b\n"
- "1:\n"
+ "b.ne 3b\n"
+ "2:\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q4, [%[a_ptr0]]\n"
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
@@ -228,8 +245,8 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa4e9b1 // sdot v17.4s, v13.16b, v4.4b[3]\n"
".inst 0x4fa4e9d2 // sdot v18.4s, v14.16b, v4.4b[3]\n"
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q8, [%[b_ptr0]]\n"
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
@@ -255,9 +272,9 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa0e9b1 // sdot v17.4s, v13.16b, v0.4b[3]\n"
".inst 0x4fa0e9d2 // sdot v18.4s, v14.16b, v0.4b[3]\n"
".inst 0x4fa0e9f3 // sdot v19.4s, v15.16b, v0.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -270,17 +287,17 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
".inst 0x4f80e152 // sdot v18.4s, v10.16b, v0.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -289,7 +306,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f80e131 // sdot v17.4s, v9.16b, v0.4b[0]\n"
".inst 0x4f80e152 // sdot v18.4s, v10.16b, v0.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"
@@ -304,30 +321,54 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
__asm __volatile (
"a_ptr1 .req X0\n"
"c_ptr1 .req X1\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
- "ldr q8, [%[b_ptr0]]\n"
+ "ldr q1, [a_ptr1]\n"
"movi v18.4s, #0\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q8, [%[b_ptr0]]\n"
"movi v19.4s, #0\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
"movi v20.4s, #0\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
"movi v21.4s, #0\n"
- "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
"movi v22.4s, #0\n"
- "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
"movi v23.4s, #0\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
"ldr q14, [%[b_ptr0], #0x60]\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "ldr q1, [a_ptr1]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
"add a_ptr1, a_ptr1, #0x10\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q20, [c_ptr1]\n"
+ "ldr q21, [c_ptr1, #0x10]\n"
+ "ldr q22, [c_ptr1, #0x20]\n"
+ "ldr q23, [c_ptr1, #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
+ "ldr q1, [a_ptr1]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q14, [%[b_ptr0], #0x60]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -435,12 +476,12 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"ldr q14, [%[b_ptr0], #-0x20]\n"
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
- "b.ne 2b\n"
- "1:\n"
+ "b.ne 3b\n"
+ "2:\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
"prfm PSTL1KEEP, [c_ptr1]\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q4, [%[a_ptr0]]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -535,8 +576,8 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa5e9d6 // sdot v22.4s, v14.16b, v5.4b[3]\n"
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
"ldr q8, [%[b_ptr0]]\n"
@@ -578,9 +619,9 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa1e9d6 // sdot v22.4s, v14.16b, v1.4b[3]\n"
".inst 0x4fa0e9f3 // sdot v19.4s, v15.16b, v0.4b[3]\n"
".inst 0x4fa1e9f7 // sdot v23.4s, v15.16b, v1.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -599,20 +640,20 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f81e156 // sdot v22.4s, v10.16b, v1.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[0], [a_ptr1], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[1], [a_ptr1], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
"ld1 {v1.b}[2], [a_ptr1]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -625,7 +666,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f81e156 // sdot v22.4s, v10.16b, v1.4b[0]\n"
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"
@@ -648,38 +689,68 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"a_ptr2 .req X1\n"
"c_ptr1 .req X2\n"
"c_ptr2 .req X3\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
- "ldr q8, [%[b_ptr0]]\n"
+ "ldr q1, [a_ptr1]\n"
"movi v18.4s, #0\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q2, [a_ptr2]\n"
"movi v19.4s, #0\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q8, [%[b_ptr0]]\n"
"movi v20.4s, #0\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
"movi v21.4s, #0\n"
- "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
"movi v22.4s, #0\n"
- "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
"movi v23.4s, #0\n"
- "ldr q14, [%[b_ptr0], #0x60]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
"movi v24.4s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
"movi v25.4s, #0\n"
- "ldr q1, [a_ptr1]\n"
+ "ldr q14, [%[b_ptr0], #0x60]\n"
"movi v26.4s, #0\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"movi v27.4s, #0\n"
- "ldr q2, [a_ptr2]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q20, [c_ptr1]\n"
+ "ldr q21, [c_ptr1, #0x10]\n"
+ "ldr q22, [c_ptr1, #0x20]\n"
+ "ldr q23, [c_ptr1, #0x30]\n"
+ "ldr q24, [c_ptr2]\n"
+ "ldr q25, [c_ptr2, #0x10]\n"
+ "ldr q26, [c_ptr2, #0x20]\n"
+ "ldr q27, [c_ptr2, #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
+ "ldr q1, [a_ptr1]\n"
"add a_ptr1, a_ptr1, #0x10\n"
+ "ldr q2, [a_ptr2]\n"
"add a_ptr2, a_ptr2, #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q14, [%[b_ptr0], #0x60]\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -823,13 +894,13 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
".inst 0x4fa6e9fb // sdot v27.4s, v15.16b, v6.4b[3]\n"
- "b.ne 2b\n"
- "1:\n"
+ "b.ne 3b\n"
+ "2:\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
"prfm PSTL1KEEP, [c_ptr1]\n"
"prfm PSTL1KEEP, [c_ptr2]\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q4, [%[a_ptr0]]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -958,8 +1029,8 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa4e9f3 // sdot v19.4s, v15.16b, v4.4b[3]\n"
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
".inst 0x4fa6e9fb // sdot v27.4s, v15.16b, v6.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
".inst 0x4f82e118 // sdot v24.4s, v8.16b, v2.4b[0]\n"
@@ -1017,9 +1088,9 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa0e9f3 // sdot v19.4s, v15.16b, v0.4b[3]\n"
".inst 0x4fa1e9f7 // sdot v23.4s, v15.16b, v1.4b[3]\n"
".inst 0x4fa2e9fb // sdot v27.4s, v15.16b, v2.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -1044,23 +1115,23 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[0], [a_ptr1], #1\n"
"ld1 {v2.b}[0], [a_ptr2], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[1], [a_ptr1], #1\n"
"ld1 {v2.b}[1], [a_ptr2], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
"ld1 {v1.b}[2], [a_ptr1]\n"
"ld1 {v2.b}[2], [a_ptr2]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -1077,7 +1148,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f80e173 // sdot v19.4s, v11.16b, v0.4b[0]\n"
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"
@@ -1109,46 +1180,82 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"c_ptr1 .req X3\n"
"c_ptr2 .req X4\n"
"c_ptr3 .req X5\n"
+ "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr2, a_ptr1, %[lda]\n"
+ "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add a_ptr3, a_ptr2, %[lda]\n"
+ "add c_ptr3, c_ptr2, %[ldc]\n"
+ "cbnz %[append], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
- "ldr q8, [%[b_ptr0]]\n"
+ "ldr q1, [a_ptr1]\n"
"movi v18.4s, #0\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q2, [a_ptr2]\n"
"movi v19.4s, #0\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q3, [a_ptr3]\n"
"movi v20.4s, #0\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q8, [%[b_ptr0]]\n"
"movi v21.4s, #0\n"
- "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
"movi v22.4s, #0\n"
- "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
"movi v23.4s, #0\n"
- "ldr q14, [%[b_ptr0], #0x60]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
"movi v24.4s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
"movi v25.4s, #0\n"
- "ldr q1, [a_ptr1]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
"movi v26.4s, #0\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
+ "ldr q14, [%[b_ptr0], #0x60]\n"
"movi v27.4s, #0\n"
- "ldr q2, [a_ptr2]\n"
+ "add %[a_ptr0], %[a_ptr0], #0x10\n"
"movi v28.4s, #0\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
+ "add a_ptr1, a_ptr1, #0x10\n"
"movi v29.4s, #0\n"
- "ldr q3, [a_ptr3]\n"
+ "add a_ptr2, a_ptr2, #0x10\n"
"movi v30.4s, #0\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
+ "add a_ptr3, a_ptr3, #0x10\n"
"movi v31.4s, #0\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
+ "add %[b_ptr0], %[b_ptr0], #0x80\n"
+ "cbz %[loops], 2f\n"
+ "b 3f\n"
+ "1:\n"
+ "ldr q16, [%[c_ptr0]]\n"
+ "ldr q17, [%[c_ptr0], #0x10]\n"
+ "ldr q18, [%[c_ptr0], #0x20]\n"
+ "ldr q19, [%[c_ptr0], #0x30]\n"
+ "ldr q20, [c_ptr1]\n"
+ "ldr q21, [c_ptr1, #0x10]\n"
+ "ldr q22, [c_ptr1, #0x20]\n"
+ "ldr q23, [c_ptr1, #0x30]\n"
+ "ldr q24, [c_ptr2]\n"
+ "ldr q25, [c_ptr2, #0x10]\n"
+ "ldr q26, [c_ptr2, #0x20]\n"
+ "ldr q27, [c_ptr2, #0x30]\n"
+ "ldr q28, [c_ptr3]\n"
+ "ldr q29, [c_ptr3, #0x10]\n"
+ "ldr q30, [c_ptr3, #0x20]\n"
+ "ldr q31, [c_ptr3, #0x30]\n"
+ "ldr q0, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
+ "ldr q1, [a_ptr1]\n"
"add a_ptr1, a_ptr1, #0x10\n"
+ "ldr q2, [a_ptr2]\n"
"add a_ptr2, a_ptr2, #0x10\n"
+ "ldr q3, [a_ptr3]\n"
"add a_ptr3, a_ptr3, #0x10\n"
+ "ldr q8, [%[b_ptr0]]\n"
+ "ldr q9, [%[b_ptr0], #0x10]\n"
+ "ldr q10, [%[b_ptr0], #0x20]\n"
+ "ldr q11, [%[b_ptr0], #0x30]\n"
+ "ldr q12, [%[b_ptr0], #0x40]\n"
+ "ldr q13, [%[b_ptr0], #0x50]\n"
+ "ldr q14, [%[b_ptr0], #0x60]\n"
"add %[b_ptr0], %[b_ptr0], #0x80\n"
- "cbz %[loops], 1f\n"
- "2:\n"
+ "cbz %[loops], 2f\n"
+ "3:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -1328,14 +1435,14 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
".inst 0x4fa6e9fb // sdot v27.4s, v15.16b, v6.4b[3]\n"
".inst 0x4fa7e9ff // sdot v31.4s, v15.16b, v7.4b[3]\n"
- "b.ne 2b\n"
- "1:\n"
+ "b.ne 3b\n"
+ "2:\n"
"ldr q15, [%[b_ptr0], #-0x10]\n"
"prfm PSTL1KEEP, [%[c_ptr0]]\n"
"prfm PSTL1KEEP, [c_ptr1]\n"
"prfm PSTL1KEEP, [c_ptr2]\n"
"prfm PSTL1KEEP, [c_ptr3]\n"
- "cbz %[regs], 3f\n"
+ "cbz %[regs], 4f\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
"ldr q4, [%[a_ptr0]]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
@@ -1498,8 +1605,8 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa5e9f7 // sdot v23.4s, v15.16b, v5.4b[3]\n"
".inst 0x4fa6e9fb // sdot v27.4s, v15.16b, v6.4b[3]\n"
".inst 0x4fa7e9ff // sdot v31.4s, v15.16b, v7.4b[3]\n"
- "b 4f\n"
- "3:\n"
+ "b 5f\n"
+ "4:\n"
".inst 0x4f80e110 // sdot v16.4s, v8.16b, v0.4b[0]\n"
".inst 0x4f81e114 // sdot v20.4s, v8.16b, v1.4b[0]\n"
".inst 0x4f82e118 // sdot v24.4s, v8.16b, v2.4b[0]\n"
@@ -1573,9 +1680,9 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4fa1e9f7 // sdot v23.4s, v15.16b, v1.4b[3]\n"
".inst 0x4fa2e9fb // sdot v27.4s, v15.16b, v2.4b[3]\n"
".inst 0x4fa3e9ff // sdot v31.4s, v15.16b, v3.4b[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
+ "5:\n"
+ "cbz %[blocks], 6f\n"
+ "7:\n"
"ldr q8, [%[b_ptr0]]\n"
"subs %[blocks], %[blocks], #0x1\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
@@ -1606,26 +1713,26 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
".inst 0x4f83e17f // sdot v31.4s, v11.16b, v3.4b[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "cbz %[odds], 7f\n"
+ "b.ne 7b\n"
+ "6:\n"
+ "cbz %[odds], 8f\n"
"ld1 {v0.b}[0], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[0], [a_ptr1], #1\n"
"ld1 {v2.b}[0], [a_ptr2], #1\n"
"ld1 {v3.b}[0], [a_ptr3], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[1], [%[a_ptr0]], #1\n"
"ld1 {v1.b}[1], [a_ptr1], #1\n"
"ld1 {v2.b}[1], [a_ptr2], #1\n"
"ld1 {v3.b}[1], [a_ptr3], #1\n"
"subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
+ "b.eq 9f\n"
"ld1 {v0.b}[2], [%[a_ptr0]]\n"
"ld1 {v1.b}[2], [a_ptr1]\n"
"ld1 {v2.b}[2], [a_ptr2]\n"
"ld1 {v3.b}[2], [a_ptr3]\n"
- "8:\n"
+ "9:\n"
"ldr q8, [%[b_ptr0]]\n"
"ldr q9, [%[b_ptr0], #0x10]\n"
"ldr q10, [%[b_ptr0], #0x20]\n"
@@ -1646,7 +1753,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".inst 0x4f81e177 // sdot v23.4s, v11.16b, v1.4b[0]\n"
".inst 0x4f82e17b // sdot v27.4s, v11.16b, v2.4b[0]\n"
".inst 0x4f83e17f // sdot v31.4s, v11.16b, v3.4b[0]\n"
- "7:\n"
+ "8:\n"
"str q16, [%[c_ptr0]]\n"
"str q17, [%[c_ptr0], #0x10]\n"
"str q18, [%[c_ptr0], #0x20]\n"