aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-10-26 19:05:32 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-08 12:00:31 +0000
commit421405b6a21b124288a750e2da26dc01eb7391cb (patch)
tree35f5655ce9d8b5921cb03630534f532e4eb47bf5 /src/core/NEON/kernels/arm_gemm/kernels
parentf1adf11c776aebaa8da1b8644a4ba2453afd2b81 (diff)
downloadComputeLibrary-421405b6a21b124288a750e2da26dc01eb7391cb.tar.gz
COMPMID-1675: Add SVE support
Change-Id: I86679adff556b6ffc9929b35cbf1b59b3958bdb1
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp324
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp333
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp334
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp328
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp75
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp366
10 files changed, 2048 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp
new file mode 100644
index 0000000000..3fd738e673
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_fp16_mla_3VLx8(const __fp16 *, const __fp16 *, __fp16 *, int, int, int);
+
+class interleaved_fp16_mla_3VLx8 {
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)(const __fp16 *, const __fp16 *, __fp16 *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcnth() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 1;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_fp16_mla_3VLx8;
+
+ interleaved_fp16_mla_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp
new file mode 100644
index 0000000000..92ec888244
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_fp16_mla_3VLx8(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16 *Cpanel, int ablocks, int bblocks, int K) {
+ const __fp16 *a_ptr = Apanel;
+ __fp16 *c_ptr = Cpanel;
+
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const __fp16 *a_ptr0 = a_ptr;
+ const __fp16 *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.h, #0\n"
+ "ptrue p0.h\n"
+ "mov z9.h, #0\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "mov z10.h, #0\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "mov z11.h, #0\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z12.h, #0\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "mov z13.h, #0\n"
+ "ld1h z5.h, p0/z, [%[b_ptr], #3, MUL VL]\n"
+ "mov z14.h, #0\n"
+ "ld1h z6.h, p0/z, [%[b_ptr], #4, MUL VL]\n"
+ "mov z15.h, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "mov z16.h, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "mov z17.h, #0\n"
+ "mov z18.h, #0\n"
+ "mov z19.h, #0\n"
+ "mov z20.h, #0\n"
+ "mov z21.h, #0\n"
+ "mov z22.h, #0\n"
+ "mov z23.h, #0\n"
+ "mov z24.h, #0\n"
+ "mov z25.h, #0\n"
+ "mov z26.h, #0\n"
+ "mov z27.h, #0\n"
+ "mov z28.h, #0\n"
+ "mov z29.h, #0\n"
+ "mov z30.h, #0\n"
+ "mov z31.h, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "ld1h z5.h, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "ld1h z6.h, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x10\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "st1h z8.h, p0, [%[c_ptr]]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "st1h z16.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "st1h z24.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "st1h z9.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "st1h z17.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "st1h z25.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "st1h z10.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "st1h z18.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "st1h z8.h, p0, [%[c_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "st1h z16.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "st1h z24.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "st1h z9.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "st1h z17.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "st1h z25.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "st1h z10.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "st1h z18.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "4:\n"
+ "st1h z26.h, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1h z11.h, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1h z19.h, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1h z27.h, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1h z12.h, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1h z20.h, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1h z28.h, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1h z13.h, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1h z21.h, p0, [%[c_ptr]]\n"
+ "st1h z29.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1h z14.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1h z22.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1h z30.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1h z15.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1h z23.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1h z31.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp
new file mode 100644
index 0000000000..b2327f3070
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_fp32_mla_3VLx8(const float *, const float *, float *, int, int, int);
+
+class interleaved_fp32_mla_3VLx8 {
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 1;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_fp32_mla_3VLx8;
+
+ interleaved_fp32_mla_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp
new file mode 100644
index 0000000000..bb08fc7cb0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_fp32_mla_3VLx8(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) {
+ const float *a_ptr = Apanel;
+ float *c_ptr = Cpanel;
+
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const float *a_ptr0 = a_ptr;
+ const float *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.s\n"
+ "mov z9.s, #0\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "mov z11.s, #0\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z12.s, #0\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z13.s, #0\n"
+ "ld1rqw z2.s, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z14.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z15.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z16.s, #0\n"
+ "mov z17.s, #0\n"
+ "mov z18.s, #0\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "ld1rqw z2.s, p0/z, [%[a_ptr], #-0x20]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "4:\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp
new file mode 100644
index 0000000000..91aa567d4a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_s8s32_dot_3VLx8(const int8_t *, const int8_t *, int32_t *, int, int, int);
+
+class interleaved_s8s32_dot_3VLx8 {
+public:
+ typedef int8_t operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)(const int8_t *, const int8_t *, int32_t *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 4;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_s8s32_dot_3VLx8;
+
+ interleaved_s8s32_dot_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp
new file mode 100644
index 0000000000..2e994a13f3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_s8s32_dot_3VLx8(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel, int ablocks, int bblocks, int K) {
+ const int8_t *a_ptr = Apanel;
+ int32_t *c_ptr = Cpanel;
+
+ K /= 4;
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const int8_t *a_ptr0 = a_ptr;
+ const int8_t *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.b\n"
+ "mov z9.s, #0\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "mov z11.s, #0\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z12.s, #0\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z13.s, #0\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z14.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z15.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z16.s, #0\n"
+ "mov z17.s, #0\n"
+ "mov z18.s, #0\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #-0x20]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "4:\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp
new file mode 100644
index 0000000000..ef457e454f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_u8u32_dot_3VLx8(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
+
+class interleaved_u8u32_dot_3VLx8 {
+public:
+ typedef uint8_t operand_type;
+ typedef uint32_t result_type;
+
+ typedef void (*kern_type)(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 4;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_u8u32_dot_3VLx8;
+
+ interleaved_u8u32_dot_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp
new file mode 100644
index 0000000000..f4d33a9efa
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_u8u32_dot_3VLx8(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cpanel, int ablocks, int bblocks, int K) {
+ const uint8_t *a_ptr = Apanel;
+ uint32_t *c_ptr = Cpanel;
+
+ K /= 4;
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const uint8_t *a_ptr0 = a_ptr;
+ const uint8_t *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.b\n"
+ "mov z9.s, #0\n"
+ "mov z10.s, #0\n"
+ "mov z11.s, #0\n"
+ "mov z12.s, #0\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "mov z13.s, #0\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "mov z14.s, #0\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z15.s, #0\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z16.s, #0\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z17.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z18.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #-0x20]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "4:\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp
new file mode 100644
index 0000000000..9d7f593fd0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_sgemm_3VLx8(const float *, const float *, float *, int, int, int);
+
+// 3VLx8 SGEMM "strategy" class.
+//
+// This describes the characteristics of a family of kernels, in terms of
+// the required interleave properties and the output block size.
+//
+// All kernels in the family must share these characteristics. The actual
+// kernel to be used can be chosen at runtime, based on the CPUInfo
+// structure.
+class sgemm_3VLx8 {
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
+
+ /* Kernel blocking parameters */
+
+ /* Width depends on vector length - use CNTW to compute the right value */
+ static int out_width() {
+ return svcntw() * 3;
+ }
+
+ static int out_height() {
+ return 8;
+ }
+
+ static int k_unroll() {
+ return 1;
+ }
+
+ // Use the standard SVE transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3> transforms;
+
+ kern_type kernel=sve_sgemm_3VLx8;
+
+ sgemm_3VLx8(const CPUInfo *ci) { }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp
new file mode 100644
index 0000000000..fd6f0b7f98
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+#include "../../asmlib.hpp"
+
+// Kernel implementation.
+//
+// Assume that "Apanel" points to a chunk of A blocks (each size 8xK) in read-order.
+// Assume that "Bpanel" points to a chunk of B blocks (each size 3VLxK) in read-order.
+// Assume that "Cpanel" points to a chunk of C output blocks (each size
+// 3VLx8), the chunks being arranged in a row major fashion.
+//
+// Note that the intent of this is that either ablocks or bblocks will be 1
+// - this construction allows the output loop to proceed in either order.
+
+namespace arm_gemm {
+
+void sve_sgemm_3VLx8(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) {
+ const float *a_ptr = Apanel;
+ float *c_ptr = Cpanel;
+
+ // There's no predication inside the kernel, so get a true predicate to use everywhere.
+ svbool_t ptrue = svptrue_b32();
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const float *a_ptr0 = a_ptr;
+ const float *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ // Fix up for odd lengths - set a flag if K is odd, but make
+ // sure we round up the iteration count.
+ int oddk = (K & 1);
+ int k = ((K+1)/2) - 1;
+
+ register svfloat32_t a0 asm("z0");
+ register svfloat32_t a1 asm("z1");
+ register svfloat32_t b0 asm("z2");
+ register svfloat32_t b1 asm("z3");
+ register svfloat32_t b2 asm("z4");
+ register svfloat32_t a0a asm("z5");
+ register svfloat32_t a1a asm("z6");
+
+ // Note: All prefetches commented out for now, but left in place as documentation for how it was done on NEON.
+ // Actual prefetches to be added once test hardware is available.
+ __asm __volatile (
+ // Initialize result registers, load initial operands, prime prefetches.
+ "mov z8.s, #0\n"
+ "ld1rqw %[a0].S, %[ptrue]/Z, [%[a_ptr]]\n"
+ "mov z9.s, #0\n"
+ "ld1w %[b0].S, %[ptrue]/Z, [%[b_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1rqw %[a1].S, %[ptrue]/Z, [%[a_ptr], #0x10]\n"
+ "mov z11.s, #0\n"
+ "ld1w %[b1].S, %[ptrue]/Z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z12.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #64]")
+ "mov z13.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #64]")
+ "mov z14.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #128]")
+ "mov z15.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #128]")
+ "mov z16.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #192]")
+ "mov z17.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #256]")
+ "mov z18.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #192]")
+ "mov z19.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #320]")
+ "mov z20.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #256]")
+ "mov z21.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #384]")
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+
+ // Skip loop if we are doing zero iterations of it.
+ "cbz %w[k], 4f\n"
+
+ // Loop proper
+ "1:\n"
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "ld1rqw %[a0a].s, %[ptrue]/Z, [%[a_ptr], #0x20]\n"
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "ld1rqw %[a1a].s, %[ptrue]/Z, [%[a_ptr], #0x30]\n"
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #3, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ //ASM_PREFETCH("[%[a_ptr], #320]")
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #4, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ //ASM_PREFETCH("[%[b_ptr], #448]")
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #5, MUL VL]\n"
+
+ "fmla z8.s , %[b0].s, %[a0a].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0a].s[1]\n"
+ "ld1rqw %[a0].s, %[ptrue]/Z, [%[a_ptr], #0x40]\n"
+ "fmla z10.s, %[b0].s, %[a0a].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0a].s[3]\n"
+ "fmla z12.s, %[b0].s, %[a1a].s[0]\n"
+ "ld1rqw %[a1].s, %[ptrue]/Z, [%[a_ptr], #0x50]\n"
+ "fmla z13.s, %[b0].s, %[a1a].s[1]\n"
+ "fmla z14.s, %[b0].s, %[a1a].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1a].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #6, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0a].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0a].s[1]\n"
+ //ASM_PREFETCH("[%[b_ptr], #512]")
+ "fmla z18.s, %[b1].s, %[a0a].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0a].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1a].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1a].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1a].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1a].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #7, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0a].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "fmla z26.s, %[b2].s, %[a0a].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0a].s[3]\n"
+ "incb %[b_ptr], ALL, MUL #6\n"
+ "fmla z28.s, %[b2].s, %[a1a].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1a].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla z30.s, %[b2].s, %[a1a].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1a].s[3]\n"
+ "bne 1b\n"
+
+ // Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
+ "4:\n"
+
+ // Branch to alternative tail for odd K
+ "cbnz %w[oddk], 2f\n"
+
+ // Detached final iteration (even K)
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "ld1rqw %[a0a].s, %[ptrue]/Z, [%[a_ptr], #0x20]\n"
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "ld1rqw %[a1a].s, %[ptrue]/Z, [%[a_ptr], #0x30]\n"
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #3, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #4, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #5, MUL VL]\n"
+
+ "fmla z8.s , %[b0].s, %[a0a].s[0]\n"
+ "fmla z16.s, %[b1].s, %[a0a].s[0]\n"
+ "incb %[b_ptr], ALL, MUL #6\n"
+ "fmla z9.s , %[b0].s, %[a0a].s[1]\n"
+ "st1w z8.s, %[ptrue], [%[c_ptr]]\n"
+ "fmla z17.s, %[b1].s, %[a0a].s[1]\n"
+ "st1w z16.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z24.s, %[b2].s, %[a0a].s[0]\n"
+ "st1w z24.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+
+ "fmla z25.s, %[b2].s, %[a0a].s[1]\n"
+ "st1w z9.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0a].s[2]\n"
+ "st1w z17.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z18.s, %[b1].s, %[a0a].s[2]\n"
+ "st1w z25.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z26.s, %[b2].s, %[a0a].s[2]\n"
+ "st1w z10.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z11.s, %[b0].s, %[a0a].s[3]\n"
+ "st1w z18.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z19.s, %[b1].s, %[a0a].s[3]\n"
+ "st1w z26.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z27.s, %[b2].s, %[a0a].s[3]\n"
+ "st1w z11.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "fmla z12.s, %[b0].s, %[a1a].s[0]\n"
+ "st1w z19.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "fmla z20.s, %[b1].s, %[a1a].s[0]\n"
+ "st1w z27.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ "fmla z28.s, %[b2].s, %[a1a].s[0]\n"
+ "st1w z12.s, %[ptrue], [%[c_ptr]]\n"
+
+ "fmla z13.s, %[b0].s, %[a1a].s[1]\n"
+ "st1w z20.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z21.s, %[b1].s, %[a1a].s[1]\n"
+ "st1w z28.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z29.s, %[b2].s, %[a1a].s[1]\n"
+ "st1w z13.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z14.s, %[b0].s, %[a1a].s[2]\n"
+ "st1w z21.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z22.s, %[b1].s, %[a1a].s[2]\n"
+ "st1w z29.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z30.s, %[b2].s, %[a1a].s[2]\n"
+ "st1w z14.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z15.s, %[b0].s, %[a1a].s[3]\n"
+ "st1w z22.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z23.s, %[b1].s, %[a1a].s[3]\n"
+ "st1w z30.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z31.s, %[b2].s, %[a1a].s[3]\n"
+ "st1w z15.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "b 3f\n"
+
+ // Detached final iteration (odd K)
+ "2:\n"
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "st1w z8.s, %[ptrue], [%[c_ptr]]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ "st1w z16.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "incb %[b_ptr], all, mul #3\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "st1w z24.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ "st1w z9.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "st1w z17.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "st1w z25.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "st1w z10.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "st1w z18.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "st1w z26.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "st1w z11.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "st1w z19.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "st1w z27.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "st1w z12.s, %[ptrue], [%[c_ptr]]\n"
+
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "st1w z20.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "st1w z28.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "st1w z13.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "st1w z21.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "st1w z29.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "st1w z14.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "st1w z22.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "st1w z30.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "st1w z15.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ // Common tail
+ "3:\n"
+ "st1w z23.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z31.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ :
+ [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
+ [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k)
+ : [oddk] "r" (oddk), [ptrue] "Upl" (ptrue)
+ : "x20", "x21", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18",
+ "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
+ "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE