aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-10-26 19:05:32 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-08 12:00:31 +0000
commit421405b6a21b124288a750e2da26dc01eb7391cb (patch)
tree35f5655ce9d8b5921cb03630534f532e4eb47bf5 /src/core/NEON/kernels/arm_gemm
parentf1adf11c776aebaa8da1b8644a4ba2453afd2b81 (diff)
downloadComputeLibrary-421405b6a21b124288a750e2da26dc01eb7391cb.tar.gz
COMPMID-1675: Add SVE support
Change-Id: I86679adff556b6ffc9929b35cbf1b59b3958bdb1
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp18
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp14
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int16.cpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int8.cpp14
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp12
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp324
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp333
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp334
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp328
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp75
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp366
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/list.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp1208
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp1564
-rw-r--r--src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp71
-rw-r--r--src/core/NEON/kernels/arm_gemm/transform.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/list.hpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp596
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp632
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp632
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp632
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp596
-rw-r--r--src/core/NEON/kernels/arm_gemm/utils.hpp24
27 files changed, 8058 insertions, 19 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
index 4579ebd307..9194bdd4d4 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
@@ -34,10 +34,22 @@
#include "kernels/a64_hgemm_24x8.hpp"
#include "kernels/a64_sgemm_12x8.hpp"
#include "kernels/a32_sgemm_8x6.hpp"
+#include "kernels/sve_interleaved_fp16_mla_3VLx8.hpp"
namespace arm_gemm {
-#ifdef __aarch64__
+#ifdef __ARM_FEATURE_SVE
+class GemmImpl_gemm_fp16_interleaved_fp16 : public GemmImplementation<__fp16, __fp16> {
+public:
+
+ UniqueGemmCommon<__fp16, __fp16> instantiate(const GemmArgs<__fp16> &args) override {
+ return UniqueGemmCommon<__fp16, __fp16>(new GemmInterleaved<interleaved_fp16_mla_3VLx8, __fp16, __fp16>(args));
+ }
+
+ GemmImpl_gemm_fp16_interleaved_fp16() : GemmImplementation<__fp16, __fp16>(GemmMethod::GEMM_INTERLEAVED_FP16) { }
+};
+
+#elif defined(__aarch64__)
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) || defined(FP16_KERNELS)
class GemmImpl_gemm_fp16_interleaved_fp16 : public GemmImplementation<__fp16, __fp16> {
@@ -73,13 +85,13 @@ public:
GemmImpl_gemm_fp16_interleaved() : GemmImplementation<__fp16, __fp16>(GemmMethod::GEMM_INTERLEAVED) { }
};
-#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS))
+#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS) || defined(__ARM_FEATURE_SVE))
static GemmImpl_gemm_fp16_interleaved_fp16 gemm_fp16_interleaved_fp16_impl{};
#endif
static GemmImpl_gemm_fp16_interleaved gemm_fp16_interleaved_impl{};
static std::vector<GemmImplementation<__fp16, __fp16> *> gemm_fp16_methods = {
-#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS))
+#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS) || defined(__ARM_FEATURE_SVE))
&gemm_fp16_interleaved_fp16_impl,
#endif
&gemm_fp16_interleaved_impl
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index e840e90eec..7d14971b70 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -36,10 +36,12 @@
#include "kernels/a64_sgemv_pretransposed.hpp"
#include "kernels/a64_sgemm_native_16x4.hpp"
+#include "kernels/sve_interleaved_fp32_mla_3VLx8.hpp"
+
namespace arm_gemm {
-#ifdef __aarch64__
-// SGEMM implementations for AArch64
+#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
+// SGEMM implementations for AArch64 without SVE
// Pretransposed GEMV
class GemmImpl_sgemm_gemv_pretransposed : public GemmImplementation<float, float> {
@@ -92,7 +94,9 @@ public:
class GemmImpl_sgemm_gemm_interleaved : public GemmImplementation<float, float> {
public:
UniqueGemmCommon<float, float> instantiate(const GemmArgs<float> &args) override {
-#ifdef __aarch64__
+#ifdef __ARM_FEATURE_SVE
+ return UniqueGemmCommon<float, float> (new GemmInterleaved<interleaved_fp32_mla_3VLx8, float, float>(args));
+#elif defined(__aarch64__)
return UniqueGemmCommon<float, float> (new GemmInterleaved<sgemm_12x8, float, float>(args));
#elif defined(__arm__)
return UniqueGemmCommon<float, float> (new GemmInterleaved<sgemm_8x6, float, float>(args));
@@ -105,7 +109,7 @@ public:
};
static GemmImpl_gemv_batched<float, float> gemv_batched_impl{};
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
static GemmImpl_sgemm_gemv_pretransposed sgemm_gemv_pretransposed_impl{};
static GemmImpl_sgemm_gemv_native_transposed sgemm_gemv_native_transposed_impl{};
static GemmImpl_sgemm_gemm_native sgemm_gemm_native_impl{};
@@ -115,7 +119,7 @@ static GemmImpl_sgemm_gemm_interleaved sgemm_gemm_interleaved_impl{};
/* List of implementations (order matters) */
static std::vector<GemmImplementation<float, float> *> SGemmMethods = {
&gemv_batched_impl,
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
&sgemm_gemv_pretransposed_impl,
&sgemm_gemv_native_transposed_impl,
&sgemm_gemm_native_impl,
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
index b7e8fa21af..ad171a7f9a 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
@@ -59,4 +59,4 @@ template bool method_is_compatible<int16_t, int32_t>(GemmMethod method, GemmArgs
} // namespace arm_gemm
-#endif // __aarch64__
+#endif // __aarch64__ \ No newline at end of file
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index dffa056adc..627d8abdb9 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -31,9 +31,21 @@
#include "kernels/a64_gemm_s16_12x8.hpp"
#include "kernels/a64_gemm_s8_12x8.hpp"
#include "kernels/a64_gemm_s8_4x4.hpp"
+#include "kernels/sve_interleaved_s8s32_dot_3VLx8.hpp"
namespace arm_gemm {
+#ifdef __ARM_FEATURE_SVE
+class GemmImpl_gemm_s8_interleaved_dot : public GemmImplementation<int8_t, int32_t> {
+public:
+ UniqueGemmCommon<int8_t, int32_t> instantiate(const GemmArgs<int32_t> &args) override {
+ return UniqueGemmCommon<int8_t, int32_t>(new GemmInterleaved<interleaved_s8s32_dot_3VLx8, int8_t, int32_t>(args));
+ }
+
+ GemmImpl_gemm_s8_interleaved_dot() : GemmImplementation<int8_t, int32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
+};
+#else
+
class GemmImpl_gemm_s8_interleaved_dot : public GemmImplementation<int8_t, int32_t> {
public:
bool is_supported(const GemmArgs<int32_t> &args) override {
@@ -47,6 +59,8 @@ public:
GemmImpl_gemm_s8_interleaved_dot() : GemmImplementation<int8_t, int32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
};
+#endif
+
class GemmImpl_gemm_s8_interleaved : public GemmImplementation<int8_t, int32_t> {
public:
UniqueGemmCommon<int8_t, int32_t> instantiate(const GemmArgs<int32_t> &args) override {
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
index 60b7954db3..b7c1bab6bd 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
@@ -31,9 +31,20 @@
#include "kernels/a64_gemm_u16_12x8.hpp"
#include "kernels/a64_gemm_u8_12x8.hpp"
#include "kernels/a64_gemm_u8_4x4.hpp"
+#include "kernels/sve_interleaved_u8u32_dot_3VLx8.hpp"
namespace arm_gemm {
+#ifdef __ARM_FEATURE_SVE
+class GemmImpl_gemm_u8_interleaved_dot : public GemmImplementation<uint8_t, uint32_t> {
+public:
+ UniqueGemmCommon<uint8_t, uint32_t> instantiate(const GemmArgs<uint32_t> &args) override {
+ return UniqueGemmCommon<uint8_t, uint32_t>(new GemmInterleaved<interleaved_u8u32_dot_3VLx8, uint8_t, uint32_t>(args));
+ }
+
+ GemmImpl_gemm_u8_interleaved_dot() : GemmImplementation<uint8_t, uint32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
+};
+#else
class GemmImpl_gemm_u8_interleaved_dot : public GemmImplementation<uint8_t, uint32_t> {
public:
bool is_supported(const GemmArgs<uint32_t> &args) override {
@@ -46,6 +57,7 @@ public:
GemmImpl_gemm_u8_interleaved_dot() : GemmImplementation<uint8_t, uint32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
};
+#endif
class GemmImpl_gemm_u8_interleaved : public GemmImplementation<uint8_t, uint32_t> {
public:
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp
new file mode 100644
index 0000000000..3fd738e673
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_fp16_mla_3VLx8(const __fp16 *, const __fp16 *, __fp16 *, int, int, int);
+
+class interleaved_fp16_mla_3VLx8 {
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)(const __fp16 *, const __fp16 *, __fp16 *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcnth() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 1;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_fp16_mla_3VLx8;
+
+ interleaved_fp16_mla_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp
new file mode 100644
index 0000000000..92ec888244
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_fp16_mla_3VLx8(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16 *Cpanel, int ablocks, int bblocks, int K) {
+ const __fp16 *a_ptr = Apanel;
+ __fp16 *c_ptr = Cpanel;
+
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const __fp16 *a_ptr0 = a_ptr;
+ const __fp16 *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.h, #0\n"
+ "ptrue p0.h\n"
+ "mov z9.h, #0\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "mov z10.h, #0\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "mov z11.h, #0\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z12.h, #0\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "mov z13.h, #0\n"
+ "ld1h z5.h, p0/z, [%[b_ptr], #3, MUL VL]\n"
+ "mov z14.h, #0\n"
+ "ld1h z6.h, p0/z, [%[b_ptr], #4, MUL VL]\n"
+ "mov z15.h, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "mov z16.h, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "mov z17.h, #0\n"
+ "mov z18.h, #0\n"
+ "mov z19.h, #0\n"
+ "mov z20.h, #0\n"
+ "mov z21.h, #0\n"
+ "mov z22.h, #0\n"
+ "mov z23.h, #0\n"
+ "mov z24.h, #0\n"
+ "mov z25.h, #0\n"
+ "mov z26.h, #0\n"
+ "mov z27.h, #0\n"
+ "mov z28.h, #0\n"
+ "mov z29.h, #0\n"
+ "mov z30.h, #0\n"
+ "mov z31.h, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "ld1h z5.h, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "ld1h z6.h, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x10\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "st1h z8.h, p0, [%[c_ptr]]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "st1h z16.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "st1h z24.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "st1h z9.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "st1h z17.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "st1h z25.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "st1h z10.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "st1h z18.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "st1h z8.h, p0, [%[c_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "st1h z16.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "st1h z24.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "st1h z9.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "st1h z17.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "st1h z25.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "st1h z10.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "st1h z18.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "4:\n"
+ "st1h z26.h, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1h z11.h, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1h z19.h, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1h z27.h, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1h z12.h, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1h z20.h, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1h z28.h, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1h z13.h, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1h z21.h, p0, [%[c_ptr]]\n"
+ "st1h z29.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1h z14.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1h z22.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1h z30.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1h z15.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1h z23.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1h z31.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp
new file mode 100644
index 0000000000..b2327f3070
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_fp32_mla_3VLx8(const float *, const float *, float *, int, int, int);
+
+class interleaved_fp32_mla_3VLx8 {
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 1;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_fp32_mla_3VLx8;
+
+ interleaved_fp32_mla_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp
new file mode 100644
index 0000000000..bb08fc7cb0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_fp32_mla_3VLx8(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) {
+ const float *a_ptr = Apanel;
+ float *c_ptr = Cpanel;
+
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const float *a_ptr0 = a_ptr;
+ const float *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.s\n"
+ "mov z9.s, #0\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "mov z11.s, #0\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z12.s, #0\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z13.s, #0\n"
+ "ld1rqw z2.s, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z14.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z15.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z16.s, #0\n"
+ "mov z17.s, #0\n"
+ "mov z18.s, #0\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "ld1rqw z2.s, p0/z, [%[a_ptr], #-0x20]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "4:\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp
new file mode 100644
index 0000000000..91aa567d4a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_s8s32_dot_3VLx8(const int8_t *, const int8_t *, int32_t *, int, int, int);
+
+class interleaved_s8s32_dot_3VLx8 {
+public:
+ typedef int8_t operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)(const int8_t *, const int8_t *, int32_t *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 4;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_s8s32_dot_3VLx8;
+
+ interleaved_s8s32_dot_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp
new file mode 100644
index 0000000000..2e994a13f3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_s8s32_dot_3VLx8(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel, int ablocks, int bblocks, int K) {
+ const int8_t *a_ptr = Apanel;
+ int32_t *c_ptr = Cpanel;
+
+ K /= 4;
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const int8_t *a_ptr0 = a_ptr;
+ const int8_t *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.b\n"
+ "mov z9.s, #0\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "mov z11.s, #0\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z12.s, #0\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z13.s, #0\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z14.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z15.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z16.s, #0\n"
+ "mov z17.s, #0\n"
+ "mov z18.s, #0\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #-0x20]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "4:\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp
new file mode 100644
index 0000000000..ef457e454f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_u8u32_dot_3VLx8(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
+
+class interleaved_u8u32_dot_3VLx8 {
+public:
+ typedef uint8_t operand_type;
+ typedef uint32_t result_type;
+
+ typedef void (*kern_type)(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 4;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_u8u32_dot_3VLx8;
+
+ interleaved_u8u32_dot_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp
new file mode 100644
index 0000000000..f4d33a9efa
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_u8u32_dot_3VLx8(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cpanel, int ablocks, int bblocks, int K) {
+ const uint8_t *a_ptr = Apanel;
+ uint32_t *c_ptr = Cpanel;
+
+ K /= 4;
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const uint8_t *a_ptr0 = a_ptr;
+ const uint8_t *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.b\n"
+ "mov z9.s, #0\n"
+ "mov z10.s, #0\n"
+ "mov z11.s, #0\n"
+ "mov z12.s, #0\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "mov z13.s, #0\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "mov z14.s, #0\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z15.s, #0\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z16.s, #0\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z17.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z18.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #-0x20]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "4:\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp
new file mode 100644
index 0000000000..9d7f593fd0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_sgemm_3VLx8(const float *, const float *, float *, int, int, int);
+
+// 3VLx8 SGEMM "strategy" class.
+//
+// This describes the characteristics of a family of kernels, in terms of
+// the required interleave properties and the output block size.
+//
+// All kernels in the family must share these characteristics. The actual
+// kernel to be used can be chosen at runtime, based on the CPUInfo
+// structure.
+class sgemm_3VLx8 {
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
+
+ /* Kernel blocking parameters */
+
+ /* Width depends on vector length - use CNTW to compute the right value */
+ static int out_width() {
+ return svcntw() * 3;
+ }
+
+ static int out_height() {
+ return 8;
+ }
+
+ static int k_unroll() {
+ return 1;
+ }
+
+ // Use the standard SVE transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3> transforms;
+
+ kern_type kernel=sve_sgemm_3VLx8;
+
+ sgemm_3VLx8(const CPUInfo *ci) { }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp
new file mode 100644
index 0000000000..fd6f0b7f98
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+#include "../../asmlib.hpp"
+
+// Kernel implementation.
+//
+// Assume that "Apanel" points to a chunk of A blocks (each size 8xK) in read-order.
+// Assume that "Bpanel" points to a chunk of B blocks (each size 3VLxK) in read-order.
+// Assume that "Cpanel" points to a chunk of C output blocks (each size
+// 3VLx8), the chunks being arranged in a row major fashion.
+//
+// Note that the intent of this is that either ablocks or bblocks will be 1
+// - this construction allows the output loop to proceed in either order.
+
+namespace arm_gemm {
+
+void sve_sgemm_3VLx8(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) {
+ const float *a_ptr = Apanel;
+ float *c_ptr = Cpanel;
+
+ // There's no predication inside the kernel, so get a true predicate to use everywhere.
+ svbool_t ptrue = svptrue_b32();
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const float *a_ptr0 = a_ptr;
+ const float *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ // Fix up for odd lengths - set a flag if K is odd, but make
+ // sure we round up the iteration count.
+ int oddk = (K & 1);
+ int k = ((K+1)/2) - 1;
+
+ register svfloat32_t a0 asm("z0");
+ register svfloat32_t a1 asm("z1");
+ register svfloat32_t b0 asm("z2");
+ register svfloat32_t b1 asm("z3");
+ register svfloat32_t b2 asm("z4");
+ register svfloat32_t a0a asm("z5");
+ register svfloat32_t a1a asm("z6");
+
+ // Note: All prefetches commented out for now, but left in place as documentation for how it was done on NEON.
+ // Actual prefetches to be added once test hardware is available.
+ __asm __volatile (
+ // Initialize result registers, load initial operands, prime prefetches.
+ "mov z8.s, #0\n"
+ "ld1rqw %[a0].S, %[ptrue]/Z, [%[a_ptr]]\n"
+ "mov z9.s, #0\n"
+ "ld1w %[b0].S, %[ptrue]/Z, [%[b_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1rqw %[a1].S, %[ptrue]/Z, [%[a_ptr], #0x10]\n"
+ "mov z11.s, #0\n"
+ "ld1w %[b1].S, %[ptrue]/Z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z12.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #64]")
+ "mov z13.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #64]")
+ "mov z14.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #128]")
+ "mov z15.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #128]")
+ "mov z16.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #192]")
+ "mov z17.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #256]")
+ "mov z18.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #192]")
+ "mov z19.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #320]")
+ "mov z20.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #256]")
+ "mov z21.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #384]")
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+
+ // Skip loop if we are doing zero iterations of it.
+ "cbz %w[k], 4f\n"
+
+ // Loop proper
+ "1:\n"
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "ld1rqw %[a0a].s, %[ptrue]/Z, [%[a_ptr], #0x20]\n"
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "ld1rqw %[a1a].s, %[ptrue]/Z, [%[a_ptr], #0x30]\n"
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #3, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ //ASM_PREFETCH("[%[a_ptr], #320]")
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #4, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ //ASM_PREFETCH("[%[b_ptr], #448]")
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #5, MUL VL]\n"
+
+ "fmla z8.s , %[b0].s, %[a0a].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0a].s[1]\n"
+ "ld1rqw %[a0].s, %[ptrue]/Z, [%[a_ptr], #0x40]\n"
+ "fmla z10.s, %[b0].s, %[a0a].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0a].s[3]\n"
+ "fmla z12.s, %[b0].s, %[a1a].s[0]\n"
+ "ld1rqw %[a1].s, %[ptrue]/Z, [%[a_ptr], #0x50]\n"
+ "fmla z13.s, %[b0].s, %[a1a].s[1]\n"
+ "fmla z14.s, %[b0].s, %[a1a].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1a].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #6, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0a].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0a].s[1]\n"
+ //ASM_PREFETCH("[%[b_ptr], #512]")
+ "fmla z18.s, %[b1].s, %[a0a].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0a].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1a].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1a].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1a].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1a].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #7, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0a].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "fmla z26.s, %[b2].s, %[a0a].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0a].s[3]\n"
+ "incb %[b_ptr], ALL, MUL #6\n"
+ "fmla z28.s, %[b2].s, %[a1a].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1a].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla z30.s, %[b2].s, %[a1a].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1a].s[3]\n"
+ "bne 1b\n"
+
+ // Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
+ "4:\n"
+
+ // Branch to alternative tail for odd K
+ "cbnz %w[oddk], 2f\n"
+
+ // Detached final iteration (even K)
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "ld1rqw %[a0a].s, %[ptrue]/Z, [%[a_ptr], #0x20]\n"
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "ld1rqw %[a1a].s, %[ptrue]/Z, [%[a_ptr], #0x30]\n"
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #3, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #4, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #5, MUL VL]\n"
+
+ "fmla z8.s , %[b0].s, %[a0a].s[0]\n"
+ "fmla z16.s, %[b1].s, %[a0a].s[0]\n"
+ "incb %[b_ptr], ALL, MUL #6\n"
+ "fmla z9.s , %[b0].s, %[a0a].s[1]\n"
+ "st1w z8.s, %[ptrue], [%[c_ptr]]\n"
+ "fmla z17.s, %[b1].s, %[a0a].s[1]\n"
+ "st1w z16.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z24.s, %[b2].s, %[a0a].s[0]\n"
+ "st1w z24.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+
+ "fmla z25.s, %[b2].s, %[a0a].s[1]\n"
+ "st1w z9.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0a].s[2]\n"
+ "st1w z17.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z18.s, %[b1].s, %[a0a].s[2]\n"
+ "st1w z25.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z26.s, %[b2].s, %[a0a].s[2]\n"
+ "st1w z10.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z11.s, %[b0].s, %[a0a].s[3]\n"
+ "st1w z18.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z19.s, %[b1].s, %[a0a].s[3]\n"
+ "st1w z26.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z27.s, %[b2].s, %[a0a].s[3]\n"
+ "st1w z11.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "fmla z12.s, %[b0].s, %[a1a].s[0]\n"
+ "st1w z19.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "fmla z20.s, %[b1].s, %[a1a].s[0]\n"
+ "st1w z27.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ "fmla z28.s, %[b2].s, %[a1a].s[0]\n"
+ "st1w z12.s, %[ptrue], [%[c_ptr]]\n"
+
+ "fmla z13.s, %[b0].s, %[a1a].s[1]\n"
+ "st1w z20.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z21.s, %[b1].s, %[a1a].s[1]\n"
+ "st1w z28.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z29.s, %[b2].s, %[a1a].s[1]\n"
+ "st1w z13.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z14.s, %[b0].s, %[a1a].s[2]\n"
+ "st1w z21.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z22.s, %[b1].s, %[a1a].s[2]\n"
+ "st1w z29.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z30.s, %[b2].s, %[a1a].s[2]\n"
+ "st1w z14.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z15.s, %[b0].s, %[a1a].s[3]\n"
+ "st1w z22.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z23.s, %[b1].s, %[a1a].s[3]\n"
+ "st1w z30.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z31.s, %[b2].s, %[a1a].s[3]\n"
+ "st1w z15.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "b 3f\n"
+
+ // Detached final iteration (odd K)
+ "2:\n"
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "st1w z8.s, %[ptrue], [%[c_ptr]]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ "st1w z16.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "incb %[b_ptr], all, mul #3\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "st1w z24.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ "st1w z9.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "st1w z17.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "st1w z25.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "st1w z10.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "st1w z18.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "st1w z26.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "st1w z11.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "st1w z19.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "st1w z27.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "st1w z12.s, %[ptrue], [%[c_ptr]]\n"
+
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "st1w z20.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "st1w z28.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "st1w z13.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "st1w z21.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "st1w z29.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "st1w z14.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "st1w z22.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "st1w z30.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "st1w z15.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ // Common tail
+ "3:\n"
+ "st1w z23.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z31.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ :
+ [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
+ [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k)
+ : [oddk] "r" (oddk), [ptrue] "Upl" (ptrue)
+ : "x20", "x21", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18",
+ "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
+ "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/merges/list.hpp b/src/core/NEON/kernels/arm_gemm/merges/list.hpp
index d93f1b0e6e..181d1a4e3f 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/list.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/list.hpp
@@ -26,3 +26,5 @@
#include "a64_merge_float_to_half_12x8.hpp"
#include "a64_merge_half_24x8.hpp"
#include "a64_merge_int32_12x8.hpp"
+#include "sve_merge_fp32_2VLx8.hpp"
+#include "sve_merge_fp32_3VLx8.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp
new file mode 100644
index 0000000000..7479c8d77c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp
@@ -0,0 +1,1208 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+inline void MergeResults<2, 8, true>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
+{
+ const float *inptr = in;
+
+ for (int y=y0; y<ymax; y+=8) {
+ float *outptr0 = out + (y * ldout) + x0;
+ float *outptr1 = outptr0 + ldout;
+ float *outptr2 = outptr1 + ldout;
+ float *outptr3 = outptr2 + ldout;
+ float *outptr4 = outptr3 + ldout;
+ float *outptr5 = outptr4 + ldout;
+ float *outptr6 = outptr5 + ldout;
+ float *outptr7 = outptr6 + ldout;
+
+ const int height = ymax - y;
+
+ for (int i=x0; i<xmax; i+=(2 * get_vector_length<float>())) {
+ if (beta==0.0f)
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z7.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "prfm PSTL1KEEP, [%[outptr7], #0x40]\n"
+ "addvl %[outptr7], %[outptr7], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ else
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z11.s, p0/z, [%[outptr7]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr7], #1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "prfm PLDL1KEEP, [%[outptr7], #0x40]\n"
+ "addvl %[outptr7], %[outptr7], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp
new file mode 100644
index 0000000000..27084c3598
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp
@@ -0,0 +1,1564 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+inline void MergeResults<3, 8, true>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
+{
+ const float *inptr = in;
+
+ for (int y=y0; y<ymax; y+=8) {
+ float *outptr0 = out + (y * ldout) + x0;
+ float *outptr1 = outptr0 + ldout;
+ float *outptr2 = outptr1 + ldout;
+ float *outptr3 = outptr2 + ldout;
+ float *outptr4 = outptr3 + ldout;
+ float *outptr5 = outptr4 + ldout;
+ float *outptr6 = outptr5 + ldout;
+ float *outptr7 = outptr6 + ldout;
+
+ const int height = ymax - y;
+
+ for (int i=x0; i<xmax; i+=(3 * get_vector_length<float>())) {
+ if (beta==0.0f)
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #4, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z7.s, p0/z, [x8, #5, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #4, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+ "addvl %[outptr7], %[outptr7], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ else
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr5], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr6], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z11.s, p0/z, [%[outptr7]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr7], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr6], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr7], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+ "addvl %[outptr7], %[outptr7], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
new file mode 100644
index 0000000000..b7323ebaea
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "mergeresults.hpp"
+#include "transform.hpp"
+
+namespace arm_gemm {
+
+/*
+ * Define "standard" transforms for the blocked GEMMs for SVE.
+ *
+ * This assumes that A is interleaved 'height' ways, B is interleaved
+ * 'width'xVL ways and transposed, and that the merge needs to work in
+ * 'height' x 'width'xVL blocks.
+ *
+ * The optional 'block' parameter is for kernels using dot-product type
+ * instructions like UDOT and SDOT.
+ */
+template<typename TOperand, typename TResult, unsigned int height, unsigned int width_vectors, unsigned int block=1, unsigned int mmla=1>
+class StdTransformsSVE
+{
+public:
+ template<typename TIn>
+ void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
+ const int ymax, const int k0, const int kmax, bool transposed) {
+ if (transposed) {
+ Transform<height, block, true>(out, in, stride, y0, ymax, k0, kmax);
+ } else {
+ Transform<height, block, false>(out, in, stride, y0, ymax, k0, kmax);
+ }
+ }
+
+ template<typename TIn>
+ void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
+ const int xmax, const int k0, const int kmax, bool transposed) {
+ if (transposed) {
+ Transform<width_vectors, block, false, true>(out, in, stride, x0, xmax, k0, kmax);
+ } else {
+ Transform<width_vectors, block, true, true>(out, in, stride, x0, xmax, k0, kmax);
+ }
+ }
+
+ template<typename TOut>
+ void Merge(TOut *out, const TResult *in, int stride, int y0, int ymax, int x0, int xmax, const TOut alpha, const TOut beta) {
+ MergeResults<width_vectors / mmla, height, true>(out, in, stride, y0, ymax, x0, xmax, alpha, beta);
+ }
+};
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/transform.hpp b/src/core/NEON/kernels/arm_gemm/transform.hpp
index 77d0d87a4d..e422b91c83 100644
--- a/src/core/NEON/kernels/arm_gemm/transform.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transform.hpp
@@ -40,7 +40,7 @@ struct TransformImpl {
static void Transform(TOut* out, const TIn* const in, const int stride,
const int y0, const int ymax, const int x0, const int xmax) {
// For SVE cases we multiply the interleave factor by the vector length.
- const unsigned int IntBy = tIntBy * (sve ? get_vector_length<TOut>() : 1);
+ const unsigned int IntBy = tIntBy * (sve ? get_vector_length<TOut>() / BlockBy : 1);
const int n_whole_y_blocks = (ymax - y0) / IntBy;
const int y_remainders = (ymax - y0) % IntBy;
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
index 8ad5b857fb..17328a5d6a 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
@@ -23,9 +23,17 @@
*/
#include "a32_interleave_6way_32bit.hpp"
#include "a32_transpose_interleave_8way_32bit.hpp"
+#ifdef __ARM_FEATURE_SVE
+#include "sve_interleave_8way_32bit.hpp"
+#include "sve_interleave_8way_block2_16bit.hpp"
+#include "sve_interleave_8way_block2_32bit.hpp"
+#include "sve_interleave_8way_block4_16bit.hpp"
+#include "sve_interleave_8way_block4_8bit.hpp"
+#else
+#include "a64_interleave_8way_32bit.hpp"
+#endif
#include "a64_block16_interleave4_8bit.hpp"
#include "a64_interleave_8way_16bit.hpp"
-#include "a64_interleave_8way_32bit.hpp"
#include "a64_interleave_8way_half_to_float.hpp"
#include "a64_transpose_interleave_12way_16bit.hpp"
#include "a64_transpose_interleave_12way_half_to_float.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp
new file mode 100644
index 0000000000..752e837f8d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 1, false, 4, 4, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint32_t *master_outptr = reinterpret_cast<uint32_t *>(out);
+ const uint32_t *inptr = reinterpret_cast<const uint32_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = inwidth * 8;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint32_t *outptr = master_outptr;
+ master_outptr += outwidth;
+
+ const uint32_t *inptr0 = inptr + y * ldin + k0;
+ const uint32_t *inptr1 = inptr0 + ldin;
+ const uint32_t *inptr2 = inptr1 + ldin;
+ const uint32_t *inptr3 = inptr2 + ldin;
+ const uint32_t *inptr4 = inptr3 + ldin;
+ const uint32_t *inptr5 = inptr4 + ldin;
+ const uint32_t *inptr6 = inptr5 + ldin;
+ const uint32_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "mov z14.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "mov z14.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z12.s, z2.s, z5.s\n"
+ "zip2 z13.s, z2.s, z5.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z5.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z5.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z6.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n"
+ "ld1w z6.s, p0/z, [%[inptr6], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n"
+ "ld1w z6.s, p0/z, [%[inptr6], %[inpos], LSL #2]\n"
+ "ld1w z7.s, p0/z, [%[inptr7], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp
new file mode 100644
index 0000000000..63c21be6bb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 2, false, 2, 2, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint16_t *master_outptr = reinterpret_cast<uint16_t *>(out);
+ const uint16_t *inptr = reinterpret_cast<const uint16_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = (inwidth * 8 + 1) / 2;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint16_t *outptr = master_outptr;
+ master_outptr += (outwidth * 2);
+
+ const uint16_t *inptr0 = inptr + y * ldin + k0;
+ const uint16_t *inptr1 = inptr0 + ldin;
+ const uint16_t *inptr2 = inptr1 + ldin;
+ const uint16_t *inptr3 = inptr2 + ldin;
+ const uint16_t *inptr4 = inptr3 + ldin;
+ const uint16_t *inptr5 = inptr4 + ldin;
+ const uint16_t *inptr6 = inptr5 + ldin;
+ const uint16_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip1 z12.s, z2.s, z5.s\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.s, z2.s, z5.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z14.s, z3.s, z5.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip2 z15.s, z3.s, z5.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z14.s, z3.s, z6.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z15.s, z3.s, z6.s\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "ld1h z7.h, p0/z, [%[inptr7]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "addvl %[inptr7], %[inptr7], #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp
new file mode 100644
index 0000000000..4cc4311cee
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 2, false, 4, 4, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint32_t *master_outptr = reinterpret_cast<uint32_t *>(out);
+ const uint32_t *inptr = reinterpret_cast<const uint32_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = (inwidth * 8 + 1) / 2;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint32_t *outptr = master_outptr;
+ master_outptr += (outwidth * 2);
+
+ const uint32_t *inptr0 = inptr + y * ldin + k0;
+ const uint32_t *inptr1 = inptr0 + ldin;
+ const uint32_t *inptr2 = inptr1 + ldin;
+ const uint32_t *inptr3 = inptr2 + ldin;
+ const uint32_t *inptr4 = inptr3 + ldin;
+ const uint32_t *inptr5 = inptr4 + ldin;
+ const uint32_t *inptr6 = inptr5 + ldin;
+ const uint32_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.s, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.s, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "zip1 z12.d, z2.d, z5.d\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z14.d, z3.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip2 z15.d, z3.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z5.s, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z14.d, z3.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z15.d, z3.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z5.s, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z6.s, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z5.s, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z6.s, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1w z7.s, p0/z, [%[inptr7]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "addvl %[inptr7], %[inptr7], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp
new file mode 100644
index 0000000000..f493786320
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 4, false, 2, 2, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint16_t *master_outptr = reinterpret_cast<uint16_t *>(out);
+ const uint16_t *inptr = reinterpret_cast<const uint16_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = (inwidth * 8 + 3) / 4;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint16_t *outptr = master_outptr;
+ master_outptr += (outwidth * 4);
+
+ const uint16_t *inptr0 = inptr + y * ldin + k0;
+ const uint16_t *inptr1 = inptr0 + ldin;
+ const uint16_t *inptr2 = inptr1 + ldin;
+ const uint16_t *inptr3 = inptr2 + ldin;
+ const uint16_t *inptr4 = inptr3 + ldin;
+ const uint16_t *inptr5 = inptr4 + ldin;
+ const uint16_t *inptr6 = inptr5 + ldin;
+ const uint16_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip1 z12.d, z2.d, z5.d\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z14.d, z3.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip2 z15.d, z3.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z14.d, z3.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z15.d, z3.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1h z7.h, p0/z, [%[inptr7]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "addvl %[inptr7], %[inptr7], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp
new file mode 100644
index 0000000000..f1690baf43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 4, false, 1, 1, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint8_t *master_outptr = reinterpret_cast<uint8_t *>(out);
+ const uint8_t *inptr = reinterpret_cast<const uint8_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = ((inwidth + 3) / 4) * 32;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint8_t *outptr = master_outptr;
+ master_outptr += outwidth;
+
+ const uint8_t *inptr0 = inptr + y * ldin + k0;
+ const uint8_t *inptr1 = inptr0 + ldin;
+ const uint8_t *inptr2 = inptr1 + ldin;
+ const uint8_t *inptr3 = inptr2 + ldin;
+ const uint8_t *inptr4 = inptr3 + ldin;
+ const uint8_t *inptr5 = inptr4 + ldin;
+ const uint8_t *inptr6 = inptr5 + ldin;
+ const uint8_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "mov z14.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "mov z14.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z12.s, z2.s, z5.s\n"
+ "zip2 z13.s, z2.s, z5.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z5.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z5.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "ld1b z5.b, p0/z, [%[inptr5], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z6.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "ld1b z5.b, p0/z, [%[inptr5], %[inpos]]\n"
+ "ld1b z6.b, p0/z, [%[inptr6], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "ld1b z5.b, p0/z, [%[inptr5], %[inpos]]\n"
+ "ld1b z6.b, p0/z, [%[inptr6], %[inpos]]\n"
+ "ld1b z7.b, p0/z, [%[inptr7], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/utils.hpp b/src/core/NEON/kernels/arm_gemm/utils.hpp
index b77bc7a566..a1fc00ea89 100644
--- a/src/core/NEON/kernels/arm_gemm/utils.hpp
+++ b/src/core/NEON/kernels/arm_gemm/utils.hpp
@@ -24,6 +24,10 @@
#pragma once
+#ifdef __ARM_FEATURE_SVE
+#include <arm_sve.h>
+#endif
+
// Macro for unreachable code (e.g. impossible default cases on switch)
#define UNREACHABLE(why) __builtin_unreachable()
@@ -31,23 +35,27 @@
// #define UNREACHABLE(why) assert(0 && why)
inline int iceildiv(const int a, const int b) {
- return (a + b - 1) / b;
+ return (a + b - 1) / b;
}
template <typename T>
inline T roundup(const T a, const T b) {
- T rem = a % b;
+ T rem = a % b;
- if (rem) {
- return a + b - rem;
- } else {
- return a;
- }
+ if (rem) {
+ return a + b - rem;
+ } else {
+ return a;
+ }
}
template <typename T>
inline unsigned long get_vector_length() {
+#ifdef __ARM_FEATURE_SVE
+ const unsigned long length = svcntb();
+#else
const unsigned long length = 16;
+#endif
return length / sizeof(T);
-}
+} \ No newline at end of file