aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels
diff options
context:
space:
mode:
authorDavid Mansell <David.Mansell@arm.com>2023-03-10 13:48:50 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2023-03-13 16:40:36 +0000
commitaaa9da1efa83911c7a67d50811ad669a92a7d12f (patch)
tree457c72960724b6e9b2e50e8b039735515c629216 /src/core/NEON/kernels
parent0ffc88b7ae8f73fc66338a4eee5348bab634edf7 (diff)
downloadComputeLibrary-aaa9da1efa83911c7a67d50811ad669a92a7d12f.tar.gz
arm_gemm: Add SME2 FP16 kernels.
Resolves: COMPMID-5966 Change-Id: Ic0d694493178da029a297643855bd0cff01b174f Signed-off-by: David Mansell <David.Mansell@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9302 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp32
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp218
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp125
-rw-r--r--src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp15
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp93
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp341
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp93
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp452
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp93
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp506
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp13
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp13
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp13
15 files changed, 2022 insertions, 11 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
index d749dce98d..efdaeeb170 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022 Arm Limited.
+ * Copyright (c) 2017-2020, 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,6 +41,11 @@
#include "kernels/a64_hgemm_8x24.hpp"
#include "kernels/a64_hybrid_fp16_mla_6x32.hpp"
#include "kernels/a64_sgemm_8x12.hpp"
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp"
#include "kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp"
@@ -52,6 +57,31 @@ namespace arm_gemm {
static const GemmImplementation<__fp16, __fp16> gemm_fp16_methods[] = {
#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL",
+ [](const GemmArgs &args) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+ return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL, __fp16, __fp16, Nothing, false, false, false, true>(args); }
+},
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL",
+ [](const GemmArgs &args) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+ return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL, __fp16, __fp16, Nothing, false, false, false, true>(args); }
+},
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL",
+ [](const GemmArgs &args) { return args._ci->has_sme2(); },
+ nullptr,
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL, __fp16, __fp16, Nothing, false, false, false, true>(args); }
+},
+#endif // ARM_COMPUTE_ENABLE_SME2
GemmImplementation<__fp16, __fp16>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp16_mla_6x4VL",
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
index e7346e8039..442739b55e 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -267,18 +267,24 @@ public:
};
// We need a similar trick here to figure out what type the accumulator buffer should be.
-template<typename strategy, typename OutputStage>
+template<typename strategy, typename OutputStage, bool ForceFloat>
class accumulate_buffer_type {
public:
typedef typename strategy::result_type type;
};
template<typename strategy>
-class accumulate_buffer_type<strategy, Requantize32> {
+class accumulate_buffer_type<strategy, Requantize32, false> {
public:
typedef int32_t type;
};
+template<typename strategy, typename OutputStage>
+class accumulate_buffer_type<strategy, OutputStage, true> {
+public:
+ typedef float type;
+};
+
// Stripe width is a concept only needed for FixedFormat kernels. Use an accessor to avoid issues in other scenarios.
template<typename strategy, bool FixedFormat>
struct get_stripe_width {
@@ -321,11 +327,11 @@ struct get_kernel_weight_format<strategy, true, To> {
} // anonymous namespace
-template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool MergeStep=true, bool FixedFormat=false, bool ForceThreadColumns=false>
+template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool MergeStep=true, bool FixedFormat=false, bool ForceThreadColumns=false, bool ForceFloatAccumulate=false>
class GemmInterleaved : public GemmCommon<To, Tr> {
typedef typename strategy::operand_type Toi;
typedef typename strategy::result_type Tri;
- typedef typename accumulate_buffer_type<strategy, OutputStage>::type Tab;
+ typedef typename accumulate_buffer_type<strategy, OutputStage, ForceFloatAccumulate>::type Tab;
/* const properties set by constructor */
const CPUInfo * const _ci;
@@ -383,7 +389,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
class blockwalker {
private:
/* Size loops, etc. based on our parent's configuration */
- const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &_parent;
+ const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &_parent;
/* K, X and multi parameters for current iteration. */
unsigned int _k0=0, _x0=0, _multi=0;
@@ -398,9 +404,9 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
bool _newmulti=true;
public:
- blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &parent) : _parent(parent) { }
+ blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &parent) : _parent(parent) { }
- blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &parent,
+ blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &parent,
unsigned int x_start, unsigned int x_end) : _parent(parent), _x0 (_x_start), _x_start(x_start), _x_end(x_end) { }
unsigned int xmax() {
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp
index 9d8eb222e6..57f26ac135 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list-sve.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#include "sme_interleave1VL_bf16_bf16.hpp"
#include "sme_interleave1VL_block2_bf16_bf16.hpp"
+#include "sme_interleave1VL_block2_fp16_fp16.hpp"
#include "sme_interleave1VL_block4_s8_s8.hpp"
#include "sme_interleave1VL_block4_u8_u8.hpp"
#include "sme_interleave1VL_block4_s8_s8_summing.hpp"
@@ -40,6 +41,7 @@
#include "sme_interleave2VL_bf16_bf16.hpp"
#include "sme_interleave2VL_fp32_fp32.hpp"
#include "sme_interleave4VL_block2_bf16_bf16.hpp"
+#include "sme_interleave4VL_block2_fp16_fp16.hpp"
#include "sme_interleave4VL_block4_s8_s8.hpp"
#include "sme_interleave4VL_block4_u8_u8.hpp"
#include "sme_interleave4VL_block4_s8_s8_summing.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp
new file mode 100644
index 0000000000..30c3e42aed
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<1, 2, VLType::SME, false>(
+ __fp16 * &out, const __fp16 * const *in,
+ size_t width, size_t height, size_t row_offset, bool
+)
+{
+ __asm__ __volatile__(
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "mov x22, %x[width]\n"
+ "mov x21, %x[width]\n"
+ "cnth x20\n"
+ "inch x22\n"
+ "sub x11, x20, #0x1\n"
+ "sub x22, x22, #0x1\n"
+ "ands x11, x21, x11\n"
+ "cntw x10\n"
+ "udiv x22, x22, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x11, x11, x20, NE\n"
+ "sub x9, x22, #0x1\n"
+ "add x11, x11, #0x1\n"
+ "sub x28, x10, #0x2\n"
+ "lsl x20, %x[height], #0x1\n" // height * 2
+ "mov x27, #0x0\n"
+ "mov x26, %x[in]\n"
+ "lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
+ "and x25, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "ldr x24, [x26, #0x0]\n"
+ "lsr x11, x11, #0x1\n"
+ "ptrue p11.s\n"
+ "ldr x23, [x26, #0x8]\n"
+ "whilelt p10.h, XZR, x20\n"
+ "mov x22, %x[row_offset]\n"
+ "mov x21, %x[out]\n"
+ "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p8.h, x27, %x[width]\n"
+ "add x26, x26, #0x10\n"
+ "mov x12, #0x0\n"
+ "cbz x28, 2f\n"
+ "1:" // K loop: Charge: Loop
+ ".inst 0x25286143 // psel p3.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25686142 // psel p2.h, p8.h/Z, p10.h[w12, #2]\n"
+ ".inst 0xe0560f00 // ld1h { za0h.h[x12] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0560ae2 // ld1h { za0h.h[x12, #2] }, p2/Z, [x23, x22, LSL #1]\n"
+ "add x12, x12, #0x4\n"
+ "ldr x23, [x26, #0x8]\n"
+ "add x26, x26, #0x10\n"
+ "cmp x12, x28, LSL #1\n"
+ "blt 1b\n"
+ "2:" // K loop: Charge: End
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
+ "mov x26, %x[in]\n"
+ "inch x27\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe05602e2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ "add x26, x26, #0x10\n"
+ "inch x22\n"
+ "cbz x9, 8f\n"
+ "mov x20, x9\n"
+ "3:" // K loop: Main loop
+ "whilelt p8.h, x27, %x[width]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "cbz x28, 5f\n"
+ "4:" // K loop: Main loop: First: Loop
+ ".inst 0x253b6143 // psel p3.h, p8.h/Z, p10.h[w15, #1]\n"
+ ".inst 0x257b6142 // psel p2.h, p8.h/Z, p10.h[w15, #3]\n"
+ ".inst 0x252a6d21 // psel p1.h, p11.h/Z, p9.h[w14]\n"
+ ".inst 0x253a6d20 // psel p0.h, p11.h/Z, p9.h[w14, #1]\n"
+ ".inst 0xe0566f01 // ld1h { za0h.h[x15, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0566ae3 // ld1h { za0h.h[x15, #3] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ "add x26, x26, #0x10\n"
+ "add x15, x15, #0x4\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0aac2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ "add x14, x14, #0x2\n"
+ "addvl x21, x21, #2\n"
+ "cmp x14, x28\n"
+ "blt 4b\n"
+ "5:" // K loop: Main loop: First: Tail
+ ".inst 0x253b6143 // psel p3.h, p8.h/Z, p10.h[w15, #1]\n"
+ ".inst 0x257b6142 // psel p2.h, p8.h/Z, p10.h[w15, #3]\n"
+ ".inst 0x252a6d21 // psel p1.h, p11.h/Z, p9.h[w14]\n"
+ ".inst 0x253a6d20 // psel p0.h, p11.h/Z, p9.h[w14, #1]\n"
+ "mov x26, %x[in]\n"
+ "whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0566f01 // ld1h { za0h.h[x15, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ "inch x27\n"
+ "mov x13, #0x0\n"
+ "whilelt p8.h, x27, %x[width]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0566ae3 // ld1h { za0h.h[x15, #3] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ "add x26, x26, #0x10\n"
+ "inch x22\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0aac2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ "addvl x21, x21, #2\n"
+ "cbz x28, 7f\n"
+ "6:" // K loop: Main loop: Second: Loop
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25696142 // psel p2.h, p8.h/Z, p10.h[w13, #2]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
+ ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0562f00 // ld1h { za0h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0562ae2 // ld1h { za0h.h[x13, #2] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ "add x26, x26, #0x10\n"
+ "add x13, x13, #0x4\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ "add x12, x12, #0x2\n"
+ "addvl x21, x21, #2\n"
+ "cmp x12, x28\n"
+ "blt 6b\n"
+ "7:" // K loop: Main loop: Second: Tail
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25696142 // psel p2.h, p8.h/Z, p10.h[w13, #2]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
+ ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+ "mov x26, %x[in]\n"
+ "whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0562f00 // ld1h { za0h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ "subs x20, x20, #0x1\n"
+ "inch x27\n"
+ ".inst 0xe0562ae2 // ld1h { za0h.h[x13, #2] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ "add x26, x26, #0x10\n"
+ "inch x22\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ "addvl x21, x21, #2\n"
+ "bgt 3b\n"
+ "8:" // K loop: Tails
+ "cbnz x25, 11f\n"
+ "mov x26, %x[in]\n"
+ "whilelt p8.h, x27, %x[width]\n"
+ "mov x13, #0x0\n"
+ "mov x12, #0x0\n"
+ "9:" // K loop: Tails: Even: First
+ ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25396143 // psel p3.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ "ldr x24, [x26, #0x0]\n"
+ "add x12, x12, #0x1\n"
+ "add x26, x26, #0x8\n"
+ "cmp x12, x10\n"
+ "addvl x21, x21, #1\n"
+ ".inst 0xe0562f01 // ld1h { za0h.h[x13, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ "add x13, x13, #0x2\n"
+ "blt 9b\n"
+ "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p8.h, x27, %x[width]\n"
+ "mov x20, #0x0\n"
+ "mov x12, #0x0\n"
+ "10:" // K loop: Tails: Even: Second
+ ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x20, x20, #0x2\n"
+ ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x21, x21, #1\n"
+ "cmp x12, x11\n"
+ "blt 10b\n"
+ "whilelt p9.h, x27, %x[width]\n"
+ "b 13f\n"
+ "11:" // K loop: Tails: Odd
+ "mov x12, #0x0\n"
+ "12:" // K loop: Tails: Odd: Loop
+ ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x11\n"
+ "addvl x21, x21, #1\n"
+ "blt 12b\n"
+ "13:" // K loop: End
+ "mov %x[out], x21\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ : [out] "+&r" (out)
+ : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+#endif // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp
new file mode 100644
index 0000000000..268bdbb924
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+template <>
+void interleave_block<4, 2, VLType::SME, false>(
+ __fp16 * &out, const __fp16 * const *in,
+ size_t width, size_t height, size_t row_offset, bool
+)
+{
+ __asm__ __volatile__(
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "mov x17, #0x0\n"
+ "mov x16, %x[row_offset]\n"
+ "cntw x15\n"
+ "cntw x14\n"
+ "cntw x11, ALL, MUL #2\n"
+ "cntw x10, ALL, MUL #3\n"
+ "cmp %x[height], x15\n"
+ "cnth x9\n"
+ "csel x15, %x[height], x15, LT\n"
+ "whilelt p11.h, XZR, %x[height]\n"
+ "whilelt p10.h, x14, %x[height]\n"
+ "whilelt p9.h, x11, %x[height]\n"
+ "whilelt p8.h, x10, %x[height]\n"
+ "ptrue p13.s\n"
+ "sub x15, x15, #0x1\n"
+ "zip1 p12.h, p11.h, p9.h\n"
+ "zip1 p11.h, p10.h, p8.h\n"
+ "mov x28, %x[out]\n"
+ "whilelt p10.h, x17, %x[width]\n"
+ "whilelt p9.h, x17, %x[width]\n"
+ "whilelt p8.h, x17, %x[width]\n"
+ "1:" // Width loop
+ "add x27, %x[in], XZR, LSL #3\n"
+ "add x26, %x[in], x14, LSL #3\n"
+ "add x25, %x[in], x11, LSL #3\n"
+ "add x20, %x[in], x10, LSL #3\n"
+ "ldr x24, [x27], #0x8\n"
+ "mov x13, #0x0\n"
+ "ldr x23, [x26], #0x8\n"
+ "ldr x22, [x25], #0x8\n"
+ "ldr x21, [x20], #0x8\n"
+ "cbz x15, 3f\n"
+ "2:" // Loads: Loop
+ ".inst 0x25296582 // psel p2.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296161 // psel p1.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0x25396580 // psel p0.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe0502b00 // ld1h { za0h.h[x13] }, p2/Z, [x24, x16, LSL #1]\n"
+ ".inst 0x25396162 // psel p2.h, p8.h/Z, p11.h[w13, #1]\n"
+ "ldr x24, [x27], #0x8\n"
+ ".inst 0xe05026e8 // ld1h { za1h.h[x13] }, p1/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x26], #0x8\n"
+ ".inst 0xe05022c1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x22, x16, LSL #1]\n"
+ "ldr x22, [x25], #0x8\n"
+ ".inst 0xe0502aa9 // ld1h { za1h.h[x13, #1] }, p2/Z, [x21, x16, LSL #1]\n"
+ "add x13, x13, #0x2\n"
+ "ldr x21, [x20], #0x8\n"
+ "cmp x13, x15, LSL #1\n"
+ "blt 2b\n"
+ "3:" // Loads: Tail
+ ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
+ "sub x20, %x[width], x17\n"
+ ".inst 0x25396582 // psel p2.h, p9.h/Z, p12.h[w13, #1]\n"
+ "cmp x20, x9\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0502700 // ld1h { za0h.h[x13] }, p1/Z, [x24, x16, LSL #1]\n"
+ ".inst 0xe05022e8 // ld1h { za1h.h[x13] }, p0/Z, [x23, x16, LSL #1]\n"
+ ".inst 0x25396161 // psel p1.h, p8.h/Z, p11.h[w13, #1]\n"
+ "csel x20, x20, x9, LT\n"
+ "add x20, x20, #0x1\n"
+ ".inst 0xe0502ac1 // ld1h { za0h.h[x13, #1] }, p2/Z, [x22, x16, LSL #1]\n"
+ "lsr x20, x20, #0x1\n"
+ ".inst 0xe05026a9 // ld1h { za1h.h[x13, #1] }, p1/Z, [x21, x16, LSL #1]\n"
+ "4:" // Stores: Loop
+ ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
+ ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0ae8b84 // st1w { za1v.s[x12] }, p2/Z, [x28, x14, LSL #2]\n"
+ ".inst 0xe0ab8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x11, LSL #2]\n"
+ ".inst 0xe0aa838c // st1w { za3v.s[x12] }, p0/Z, [x28, x10, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x28, x28, #4\n"
+ "cmp x12, x20\n"
+ "blt 4b\n"
+ "inch x17\n"
+ "inch x16\n"
+ "whilelt p10.h, x17, %x[width]\n"
+ "whilelt p9.h, x17, %x[width]\n"
+ "whilelt p8.h, x17, %x[width]\n"
+ "b.any 1b\n"
+ "mov %x[out], x28\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ : [out] "+&r" (out)
+ : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+#endif // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp b/src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp
index 468915a046..7ed8af9b45 100644
--- a/src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp
+++ b/src/core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -97,6 +97,19 @@ template void IndirectInterleave<1, 1, VLType::SME>(__fp16 *, const __fp16 * con
template void ConvolutionInterleave<1, 1, VLType::SME>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<1, 1, VLType::SME>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+/* FP16: SME implementations with dot-product type outer product */
+template void IndirectInterleave<2, 2, VLType::SME>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<2, 2, VLType::SME>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<2, 2, VLType::SME>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<1, 2, VLType::SME>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<1, 2, VLType::SME>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<1, 2, VLType::SME>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
+template void IndirectInterleave<4, 2, VLType::SME>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void ConvolutionInterleave<4, 2, VLType::SME>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+template void Interleave<4, 2, VLType::SME>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
+
/* FP32 fast-mode: SME implementations */
template void IndirectInterleave<1, 2, VLType::SME>(bfloat16 *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<1, 2, VLType::SME>(bfloat16 *, const float *, size_t, const convolver<float> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp
new file mode 100644
index 0000000000..a9196958c7
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL
+{
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+ /* Kernel blocking parameters */
+ static unsigned int out_height()
+ {
+ return sme::get_vector_length<float>() * 1;
+ }
+
+ static unsigned int out_width()
+ {
+ return sme::get_vector_length<float>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 2;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ static constexpr bool is_sme()
+ {
+ return true;
+ }
+
+ // Default to the generic kernel
+ kern_type kernel = sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL;
+
+ StdTransformsSME<operand_type, result_type, 1, 4, 2> transforms = {};
+
+ cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp
new file mode 100644
index 0000000000..ad10ce7993
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include "arm_gemm.hpp"
+
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+ struct KernelArgs
+ {
+ KernelArgs(
+ const __fp16 *const A,
+ const __fp16 *const B,
+ __fp16 *const C, const int ldc,
+ const int M, const int N, const int K,
+ const __fp16 *const bias,
+ const Activation act,
+ bool accumulate,
+ float *const accumulator_buffer
+ ) : A(A),
+ B(B), kstride_bytes(roundup(K, 2) * sizeof(__fp16)),
+ C(C), ldcb(ldc * sizeof(__fp16)),
+ M(M), N(N), K(K),
+ min(-static_cast<__fp16>(std::numeric_limits<float>::infinity())),
+ max(static_cast<__fp16>(std::numeric_limits<float>::infinity())),
+ bias(bias),
+ accumulator_buffer(accumulator_buffer),
+ flags(0x0)
+ {
+ if (accumulate)
+ {
+ flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER
+ }
+ if (C == nullptr)
+ {
+ flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER
+ }
+
+ // Initialise the activation values
+ switch (act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ this->max = static_cast<__fp16>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ this->min = static_cast<__fp16>(0);
+ break;
+ }
+ }
+
+ const __fp16 *const A;
+ const __fp16 *const B;
+ const long kstride_bytes;
+ __fp16 *const C;
+ const long ldcb;
+ const long M, N, K;
+ __fp16 min = -static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ __fp16 max = static_cast<__fp16>(std::numeric_limits<float>::infinity());
+
+ const __fp16 *const bias;
+
+ float *const accumulator_buffer;
+ uint64_t flags;
+ };
+
+ // Construct arguments for this kernel
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+ __asm__ __volatile__(
+ "ldr x13, [%x[args], %[offsetof_flags]]\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "ptrue p0.b\n"
+ ".inst 0x25207811 // ptrue pn9.b\n"
+ "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x13, #0, 2f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "1:" // Initial accumulator load from buffer: Loop
+ ".inst 0xa040c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa041c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 1b\n"
+ "2:" // Initial accumulator load from buffer: End
+ "ldr w9, [%x[args], %[offsetof_M]]\n"
+ "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
+ "ldr w26, [%x[args], %[offsetof_N]]\n"
+ "ldr x25, [%x[args], %[offsetof_A]]\n"
+ "3:" // M and N loop
+ "mov x24, x25\n"
+ "tbnz x13, #0, 4f\n"
+ "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "cbz x20, 5f\n"
+ ".inst 0x257a4770 // whilelt pn8.h, x27, x26, VLx2\n"
+ "fmov z6.h, #0.0\n"
+ "fmov z19.h, #1.0\n"
+ ".inst 0xa01b2295 // ldnt1h { z20.h-z21.h }, p8/Z, [x20, x27, LSL #1]\n"
+ "zip1 z23.h, z20.h, z6.h\n"
+ "zip2 z12.h, z20.h, z6.h\n"
+ "zip1 z16.h, z21.h, z6.h\n"
+ "zip2 z8.h, z21.h, z6.h\n"
+ ".inst 0x81b70260 // fmopa za0.s, p0/M, p0/M, z19.h, z23.h\n"
+ ".inst 0x81ac0261 // fmopa za1.s, p0/M, p0/M, z19.h, z12.h\n"
+ ".inst 0x81b00262 // fmopa za2.s, p0/M, p0/M, z19.h, z16.h\n"
+ ".inst 0x81a80263 // fmopa za3.s, p0/M, p0/M, z19.h, z8.h\n"
+ "4:" // Prepare accumulators: Test for last block
+ "mov x20, x27\n"
+ "mov x21, x28\n"
+ "incw x20, ALL, MUL #4\n"
+ "incw x21\n"
+ "cmp x20, x26\n"
+ "mov x20, x13\n"
+ "csel x21, x28, x21, LT\n"
+ "bfm x13, XZR, #0x0, #0x0 // bfc x13, #0x0, #0x1\n"
+ "cmp x21, x9\n"
+ "csel x13, x20, x13, LT\n"
+ "5:" // Prepare accumulators: End
+ "ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "madd x23, x27, x22, x23\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1h { z21.h }, p0/Z, [x24]\n"
+ ".inst 0xa140a6f8 // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23]\n"
+ "ld1h { z29.h }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa041a6ed // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa042a6e1 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1h { z25.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa143a6fb // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "addvl x23, x23, #16\n"
+ "ble 7f\n"
+ "6:" // K loop
+ ".inst 0x81b002a0 // fmopa za0.s, p0/M, p0/M, z21.h, z16.h\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0x81b402a1 // fmopa za1.s, p0/M, p0/M, z21.h, z20.h\n"
+ ".inst 0x81b802a2 // fmopa za2.s, p0/M, p0/M, z21.h, z24.h\n"
+ ".inst 0x81bc02a3 // fmopa za3.s, p0/M, p0/M, z21.h, z28.h\n"
+ "ld1h { z21.h }, p0/Z, [x24]\n"
+ ".inst 0x81ac03a0 // fmopa za0.s, p0/M, p0/M, z29.h, z12.h\n"
+ ".inst 0xa140a6f0 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23]\n"
+ ".inst 0x81ad03a1 // fmopa za1.s, p0/M, p0/M, z29.h, z13.h\n"
+ ".inst 0x81ae03a2 // fmopa za2.s, p0/M, p0/M, z29.h, z14.h\n"
+ ".inst 0x81af03a3 // fmopa za3.s, p0/M, p0/M, z29.h, z15.h\n"
+ "ld1h { z29.h }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x81a00080 // fmopa za0.s, p0/M, p0/M, z4.h, z0.h\n"
+ ".inst 0xa041a6ec // ld1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x81a10081 // fmopa za1.s, p0/M, p0/M, z4.h, z1.h\n"
+ ".inst 0x81a20082 // fmopa za2.s, p0/M, p0/M, z4.h, z2.h\n"
+ ".inst 0x81a30083 // fmopa za3.s, p0/M, p0/M, z4.h, z3.h\n"
+ "ld1h { z4.h }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa042a6e0 // ld1h { z0.h-z3.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0x81b30320 // fmopa za0.s, p0/M, p0/M, z25.h, z19.h\n"
+ ".inst 0x81b70321 // fmopa za1.s, p0/M, p0/M, z25.h, z23.h\n"
+ ".inst 0x81bb0322 // fmopa za2.s, p0/M, p0/M, z25.h, z27.h\n"
+ ".inst 0x81bf0323 // fmopa za3.s, p0/M, p0/M, z25.h, z31.h\n"
+ "ld1h { z25.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa143a6f3 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "addvl x23, x23, #16\n"
+ "bgt 6b\n"
+ "7:" // K loop tail
+ ".inst 0x81b002a0 // fmopa za0.s, p0/M, p0/M, z21.h, z16.h\n"
+ ".inst 0x81b402a1 // fmopa za1.s, p0/M, p0/M, z21.h, z20.h\n"
+ ".inst 0x81b802a2 // fmopa za2.s, p0/M, p0/M, z21.h, z24.h\n"
+ ".inst 0x81bc02a3 // fmopa za3.s, p0/M, p0/M, z21.h, z28.h\n"
+ ".inst 0x81ac03a0 // fmopa za0.s, p0/M, p0/M, z29.h, z12.h\n"
+ ".inst 0x81ad03a1 // fmopa za1.s, p0/M, p0/M, z29.h, z13.h\n"
+ ".inst 0x81ae03a2 // fmopa za2.s, p0/M, p0/M, z29.h, z14.h\n"
+ ".inst 0x81af03a3 // fmopa za3.s, p0/M, p0/M, z29.h, z15.h\n"
+ ".inst 0x81a00080 // fmopa za0.s, p0/M, p0/M, z4.h, z0.h\n"
+ ".inst 0x81a10081 // fmopa za1.s, p0/M, p0/M, z4.h, z1.h\n"
+ ".inst 0x81a20082 // fmopa za2.s, p0/M, p0/M, z4.h, z2.h\n"
+ ".inst 0x81a30083 // fmopa za3.s, p0/M, p0/M, z4.h, z3.h\n"
+ ".inst 0x81b30320 // fmopa za0.s, p0/M, p0/M, z25.h, z19.h\n"
+ ".inst 0x81b70321 // fmopa za1.s, p0/M, p0/M, z25.h, z23.h\n"
+ ".inst 0x81bb0322 // fmopa za2.s, p0/M, p0/M, z25.h, z27.h\n"
+ ".inst 0x81bf0323 // fmopa za3.s, p0/M, p0/M, z25.h, z31.h\n"
+ "8:" // K oddments
+ "cbz x20, 10f\n"
+ "9:" // K oddments: Loop
+ "ld1h { z21.h }, p0/Z, [x24]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0xa140a6f0 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23]\n"
+ "addvl x23, x23, #4\n"
+ ".inst 0x81b002a0 // fmopa za0.s, p0/M, p0/M, z21.h, z16.h\n"
+ ".inst 0x81b402a1 // fmopa za1.s, p0/M, p0/M, z21.h, z20.h\n"
+ ".inst 0x81b802a2 // fmopa za2.s, p0/M, p0/M, z21.h, z24.h\n"
+ ".inst 0x81bc02a3 // fmopa za3.s, p0/M, p0/M, z21.h, z28.h\n"
+ "bgt 9b\n"
+ "10:" // K oddments: End
+ "tbz x13, #1, 14f\n"
+ "tbz x13, #0, 12f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "11:" // Store to partial result buffer: Store and refill: Loop
+ ".inst 0xa040c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ ".inst 0xa041c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xa042c568 // ld1w { z8.s-z11.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa060c540 // st1w { z0.s-z3.s }, pn9.b, [x10]\n"
+ ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa061c558 // st1w { z24.s-z27.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa062c544 // st1w { z4.s-z7.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa063c55c // st1w { z28.s-z31.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ "blt 11b\n"
+ "b 18f\n"
+ "12:" // Store to partial result buffer: Store only
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "13:" // Store to partial result buffer: Store only: Loop
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
+ ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
+ ".inst 0xa060c540 // st1w { z0.s-z3.s }, pn9.b, [x10]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa061c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa062c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ ".inst 0xa063c544 // st1w { z4.s-z7.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ "blt 13b\n"
+ "b 18f\n"
+ "14:" // Store to output array
+ "ldr x23, [%x[args], %[offsetof_C]]\n"
+ "sub x22, x9, x28\n"
+ "cntw x21\n"
+ "ld1rh { z17.h }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ldr x20, [%x[args], %[offsetof_ldcb]]\n"
+ ".inst 0x257a4770 // whilelt pn8.h, x27, x26, VLx2\n"
+ "cmp x22, x21\n"
+ "ld1rh { z16.h }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "mov x12, #0x0\n"
+ "csel x22, x22, x21, LT\n"
+ "add x23, x23, x27, LSL #1\n" // C += n
+ "madd x23, x28, x20, x23\n" // C += m * ldc
+ "15:" // Store to output array: Accumulator loop
+ ".inst 0xc0060414 // mova { z20.b-z23.b }, za0h.b[x12, 0:3]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc120e28e // fcvt z14.h, { z20.s-z21.s }\n"
+ ".inst 0xc120e2cf // fcvt z15.h, { z22.s-z23.s }\n"
+ "cmp x12, x22, LSL #2\n"
+ ".inst 0xc170c22e // fclamp { z14.h-z15.h }, z17.h, z16.h\n"
+ ".inst 0xa06022ee // st1h { z14.h-z15.h }, p8, [x23]\n"
+ "add x23, x23, x20\n"
+ "blt 15b\n"
+ "16:" // Store to output array: End
+ "tbz x13, #0, 18f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "17:" // Store to output array: Refill accumulators: Loop
+ ".inst 0xa040c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa041c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840681 // mova za1h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 17b\n"
+ "18:" // End block
+ "incw x27, ALL, MUL #4\n"
+ "cmp x27, x26\n"
+ "blt 3b\n"
+ "incw x28\n"
+ "mov x27, #0x0\n"
+ "cmp x28, x9\n"
+ "mov x25, x24\n"
+ "blt 3b\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp
new file mode 100644
index 0000000000..5bd34b2ca0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL
+{
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+ /* Kernel blocking parameters */
+ static unsigned int out_height()
+ {
+ return sme::get_vector_length<float>() * 2;
+ }
+
+ static unsigned int out_width()
+ {
+ return sme::get_vector_length<float>() * 2;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 2;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ static constexpr bool is_sme()
+ {
+ return true;
+ }
+
+ // Default to the generic kernel
+ kern_type kernel = sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL;
+
+ StdTransformsSME<operand_type, result_type, 2, 2, 2> transforms = {};
+
+ cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp
new file mode 100644
index 0000000000..5c48f953e8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include "arm_gemm.hpp"
+
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+ struct KernelArgs
+ {
+ KernelArgs(
+ const __fp16 *const A,
+ const __fp16 *const B,
+ __fp16 *const C, const int ldc,
+ const int M, const int N, const int K,
+ const __fp16 *const bias,
+ const Activation act,
+ bool accumulate,
+ float *const accumulator_buffer
+ ) : A(A),
+ B(B), kstride_bytes(roundup(K, 2) * sizeof(__fp16)),
+ C(C), ldcb(ldc * sizeof(__fp16)),
+ M(M), N(N), K(K),
+ min(-static_cast<__fp16>(std::numeric_limits<float>::infinity())),
+ max(static_cast<__fp16>(std::numeric_limits<float>::infinity())),
+ bias(bias),
+ accumulator_buffer(accumulator_buffer),
+ flags(0x0)
+ {
+ if (accumulate)
+ {
+ flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER
+ }
+ if (C == nullptr)
+ {
+ flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER
+ }
+
+ // Initialise the activation values
+ switch (act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ this->max = static_cast<__fp16>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ this->min = static_cast<__fp16>(0);
+ break;
+ }
+ }
+
+ const __fp16 *const A;
+ const __fp16 *const B;
+ const long kstride_bytes;
+ __fp16 *const C;
+ const long ldcb;
+ const long M, N, K;
+ __fp16 min = -static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ __fp16 max = static_cast<__fp16>(std::numeric_limits<float>::infinity());
+
+ const __fp16 *const bias;
+
+ float *const accumulator_buffer;
+ uint64_t flags;
+ };
+
+ // Construct arguments for this kernel
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+ __asm__ __volatile__(
+ "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "ptrue p1.b\n"
+ ".inst 0x25207810 // ptrue pn8.b\n"
+ "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x16, #0, 2f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "1:" // Initial accumulator load from buffer: Loop
+ ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 1b\n"
+ "2:" // Initial accumulator load from buffer: End
+ "ldr w13, [%x[args], %[offsetof_M]]\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
+ "ldr w9, [%x[args], %[offsetof_N]]\n"
+ "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "3:" // M and N loop
+ "mov x27, x28\n"
+ "tbnz x16, #0, 4f\n"
+ "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "cbz x20, 5f\n"
+ "whilelt p0.h, x10, x9\n"
+ "fmov z10.h, #0.0\n"
+ "fmov z11.h, #1.0\n"
+ "ld1h { z18.h }, p0/Z, [x20, x10, LSL #1]\n"
+ "zip1 z2.h, z18.h, z10.h\n"
+ "zip2 z19.h, z18.h, z10.h\n"
+ ".inst 0x81a22560 // fmopa za0.s, p1/M, p1/M, z11.h, z2.h\n"
+ ".inst 0x81b32561 // fmopa za1.s, p1/M, p1/M, z11.h, z19.h\n"
+ ".inst 0x81a22562 // fmopa za2.s, p1/M, p1/M, z11.h, z2.h\n"
+ ".inst 0x81b32563 // fmopa za3.s, p1/M, p1/M, z11.h, z19.h\n"
+ "4:" // Prepare accumulators: Test for last block
+ "mov x20, x10\n"
+ "mov x21, x11\n"
+ "incw x20, ALL, MUL #2\n"
+ "incw x21, ALL, MUL #2\n"
+ "cmp x20, x9\n"
+ "mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
+ "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
+ "cmp x21, x13\n"
+ "csel x16, x20, x16, LT\n"
+ "5:" // Prepare accumulators: End
+ "ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0402374 // ld1h { z20.h-z21.h }, pn8.b/Z, [x27]\n"
+ ".inst 0xa14022ed // ldnt1h { z5.h, z13.h }, pn8.b/Z, [x23]\n"
+ ".inst 0xa041236a // ld1h { z10.h-z11.h }, pn8.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa14122ec // ldnt1h { z4.h, z12.h }, pn8.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0422372 // ld1h { z18.h-z19.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa04222fb // ldnt1h { z26.h-z27.h }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1432366 // ld1h { z6.h, z14.h }, pn8.b/Z, [x27, #0x6, MUL VL]\n"
+ "addvl x27, x27, #8\n"
+ ".inst 0xa04322f9 // ldnt1h { z24.h-z25.h }, pn8.b/Z, [x23, #0x6, MUL VL]\n"
+ "addvl x23, x23, #8\n"
+ "ble 7f\n"
+ "6:" // K loop
+ ".inst 0x81a52680 // fmopa za0.s, p1/M, p1/M, z20.h, z5.h\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0x81ad2681 // fmopa za1.s, p1/M, p1/M, z20.h, z13.h\n"
+ ".inst 0x81a526a2 // fmopa za2.s, p1/M, p1/M, z21.h, z5.h\n"
+ ".inst 0x81ad26a3 // fmopa za3.s, p1/M, p1/M, z21.h, z13.h\n"
+ ".inst 0xa0402374 // ld1h { z20.h-z21.h }, pn8.b/Z, [x27]\n"
+ ".inst 0x81a42540 // fmopa za0.s, p1/M, p1/M, z10.h, z4.h\n"
+ ".inst 0xa14022e5 // ld1h { z5.h, z13.h }, pn8.b/Z, [x23]\n"
+ ".inst 0x81ac2541 // fmopa za1.s, p1/M, p1/M, z10.h, z12.h\n"
+ ".inst 0x81a42562 // fmopa za2.s, p1/M, p1/M, z11.h, z4.h\n"
+ ".inst 0x81ac2563 // fmopa za3.s, p1/M, p1/M, z11.h, z12.h\n"
+ ".inst 0xa041236a // ld1h { z10.h-z11.h }, pn8.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0x81ba2640 // fmopa za0.s, p1/M, p1/M, z18.h, z26.h\n"
+ ".inst 0xa14122e4 // ld1h { z4.h, z12.h }, pn8.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0x81bb2641 // fmopa za1.s, p1/M, p1/M, z18.h, z27.h\n"
+ ".inst 0x81ba2662 // fmopa za2.s, p1/M, p1/M, z19.h, z26.h\n"
+ ".inst 0x81bb2663 // fmopa za3.s, p1/M, p1/M, z19.h, z27.h\n"
+ ".inst 0xa0422372 // ld1h { z18.h-z19.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa04222fa // ld1h { z26.h-z27.h }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x81b824c0 // fmopa za0.s, p1/M, p1/M, z6.h, z24.h\n"
+ ".inst 0x81b924c1 // fmopa za1.s, p1/M, p1/M, z6.h, z25.h\n"
+ ".inst 0x81b825c2 // fmopa za2.s, p1/M, p1/M, z14.h, z24.h\n"
+ ".inst 0x81b925c3 // fmopa za3.s, p1/M, p1/M, z14.h, z25.h\n"
+ ".inst 0xa1432366 // ld1h { z6.h, z14.h }, pn8.b/Z, [x27, #0x6, MUL VL]\n"
+ "addvl x27, x27, #8\n"
+ ".inst 0xa04322f8 // ld1h { z24.h-z25.h }, pn8.b/Z, [x23, #0x6, MUL VL]\n"
+ "addvl x23, x23, #8\n"
+ "bgt 6b\n"
+ "7:" // K loop tail
+ ".inst 0x81a52680 // fmopa za0.s, p1/M, p1/M, z20.h, z5.h\n"
+ ".inst 0x81ad2681 // fmopa za1.s, p1/M, p1/M, z20.h, z13.h\n"
+ ".inst 0x81a526a2 // fmopa za2.s, p1/M, p1/M, z21.h, z5.h\n"
+ ".inst 0x81ad26a3 // fmopa za3.s, p1/M, p1/M, z21.h, z13.h\n"
+ ".inst 0x81a42540 // fmopa za0.s, p1/M, p1/M, z10.h, z4.h\n"
+ ".inst 0x81ac2541 // fmopa za1.s, p1/M, p1/M, z10.h, z12.h\n"
+ ".inst 0x81a42562 // fmopa za2.s, p1/M, p1/M, z11.h, z4.h\n"
+ ".inst 0x81ac2563 // fmopa za3.s, p1/M, p1/M, z11.h, z12.h\n"
+ ".inst 0x81ba2640 // fmopa za0.s, p1/M, p1/M, z18.h, z26.h\n"
+ ".inst 0x81bb2641 // fmopa za1.s, p1/M, p1/M, z18.h, z27.h\n"
+ ".inst 0x81ba2662 // fmopa za2.s, p1/M, p1/M, z19.h, z26.h\n"
+ ".inst 0x81bb2663 // fmopa za3.s, p1/M, p1/M, z19.h, z27.h\n"
+ ".inst 0x81b824c0 // fmopa za0.s, p1/M, p1/M, z6.h, z24.h\n"
+ ".inst 0x81b924c1 // fmopa za1.s, p1/M, p1/M, z6.h, z25.h\n"
+ ".inst 0x81b825c2 // fmopa za2.s, p1/M, p1/M, z14.h, z24.h\n"
+ ".inst 0x81b925c3 // fmopa za3.s, p1/M, p1/M, z14.h, z25.h\n"
+ "8:" // K oddments
+ "cbz x20, 10f\n"
+ "9:" // K oddments: Loop
+ ".inst 0xa0402374 // ld1h { z20.h-z21.h }, pn8.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x27, x27, #2\n"
+ ".inst 0xa14022e5 // ld1h { z5.h, z13.h }, pn8.b/Z, [x23]\n"
+ "addvl x23, x23, #2\n"
+ ".inst 0x81a52680 // fmopa za0.s, p1/M, p1/M, z20.h, z5.h\n"
+ ".inst 0x81ad2681 // fmopa za1.s, p1/M, p1/M, z20.h, z13.h\n"
+ ".inst 0x81a526a2 // fmopa za2.s, p1/M, p1/M, z21.h, z5.h\n"
+ ".inst 0x81ad26a3 // fmopa za3.s, p1/M, p1/M, z21.h, z13.h\n"
+ "bgt 9b\n"
+ "10:" // K oddments: End
+ "tbz x16, #1, 14f\n"
+ "tbz x16, #0, 12f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "11:" // Store to partial result buffer: Store and refill: Loop
+ ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
+ ".inst 0xa042c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa060c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14]\n"
+ ".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa061c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 11b\n"
+ "b 23f\n"
+ "12:" // Store to partial result buffer: Store only
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "13:" // Store to partial result buffer: Store only: Loop
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1dc // st1w { z28.s-z31.s }, pn8.b, [x14]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa062c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 13b\n"
+ "b 23f\n"
+ "14:" // Store to output array
+ "ldr x26, [%x[args], %[offsetof_C]]\n"
+ "sub x25, x13, x11\n"
+ "cntw x24\n"
+ "ld1rh { z20.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "whilelt p0.h, x10, x9\n"
+ "cmp x25, x24\n"
+ "ld1rh { z19.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "csel x22, x25, x24, LT\n"
+ "mov x12, #0x0\n"
+ "add x26, x26, x10, LSL #1\n" // C += n
+ "lsr x21, x22, #0x2\n"
+ "madd x26, x11, x23, x26\n" // C += m * ldc
+ "and x20, x22, #0x3\n"
+ "cbz x21, 16f\n"
+ "15:" // Store to output array: Accumulator row 0 loop
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
+ "fcvt z8.h, p1/m, z8.s\n"
+ "fcvt z9.h, p1/m, z9.s\n"
+ "fcvt z10.h, p1/m, z10.s\n"
+ "fcvt z11.h, p1/m, z11.s\n"
+ "add x12, x12, #0x4\n"
+ "fcvt z28.h, p1/m, z28.s\n"
+ "fcvt z29.h, p1/m, z29.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "fcvt z30.h, p1/m, z30.s\n"
+ "fcvt z31.h, p1/m, z31.s\n"
+ ".inst 0xc173ca88 // fclamp { z8.h-z11.h }, z20.h, z19.h\n"
+ ".inst 0xc173ca9c // fclamp { z28.h-z31.h }, z20.h, z19.h\n"
+ "uzp1 z16.h, z8.h, z28.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "uzp1 z18.h, z9.h, z29.h\n"
+ "uzp1 z17.h, z10.h, z30.h\n"
+ "uzp1 z16.h, z11.h, z31.h\n"
+ "st1h { z18.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z17.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "blt 15b\n"
+ "16:" // Store to output array: Accumulator row 0 oddments
+ "cbz x20, 17f\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ "fcvt z8.h, p1/m, z8.s\n"
+ "fcvt z9.h, p1/m, z9.s\n"
+ "fcvt z10.h, p1/m, z10.s\n"
+ "fcvt z11.h, p1/m, z11.s\n"
+ "subs x20, x20, #0x1\n"
+ "fcvt z12.h, p1/m, z12.s\n"
+ "fcvt z13.h, p1/m, z13.s\n"
+ "fcvt z14.h, p1/m, z14.s\n"
+ "fcvt z15.h, p1/m, z15.s\n"
+ ".inst 0xc173ca88 // fclamp { z8.h-z11.h }, z20.h, z19.h\n"
+ ".inst 0xc173ca8c // fclamp { z12.h-z15.h }, z20.h, z19.h\n"
+ "uzp1 z16.h, z8.h, z12.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 17f\n"
+ "subs x20, x20, #0x1\n"
+ "uzp1 z16.h, z9.h, z13.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 17f\n"
+ "uzp1 z16.h, z10.h, z14.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "17:" // Store to output array: Accumulator row 0 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 21f\n"
+ "whilelt p0.h, x10, x9\n"
+ "cmp x25, x24\n"
+ "csel x20, x25, x24, LT\n"
+ "mov x12, #0x0\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "cbz x21, 19f\n"
+ "18:" // Store to output array: Accumulator row 1 loop
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ "fcvt z0.h, p1/m, z0.s\n"
+ "fcvt z1.h, p1/m, z1.s\n"
+ "fcvt z2.h, p1/m, z2.s\n"
+ "fcvt z3.h, p1/m, z3.s\n"
+ "add x12, x12, #0x4\n"
+ "fcvt z28.h, p1/m, z28.s\n"
+ "fcvt z29.h, p1/m, z29.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "fcvt z30.h, p1/m, z30.s\n"
+ "fcvt z31.h, p1/m, z31.s\n"
+ ".inst 0xc173ca80 // fclamp { z0.h-z3.h }, z20.h, z19.h\n"
+ ".inst 0xc173ca9c // fclamp { z28.h-z31.h }, z20.h, z19.h\n"
+ "uzp1 z16.h, z0.h, z28.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "uzp1 z18.h, z1.h, z29.h\n"
+ "uzp1 z17.h, z2.h, z30.h\n"
+ "uzp1 z16.h, z3.h, z31.h\n"
+ "st1h { z18.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z17.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "blt 18b\n"
+ "19:" // Store to output array: Accumulator row 1 oddments
+ "cbz x20, 20f\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ "fcvt z28.h, p1/m, z28.s\n"
+ "fcvt z29.h, p1/m, z29.s\n"
+ "fcvt z30.h, p1/m, z30.s\n"
+ "fcvt z31.h, p1/m, z31.s\n"
+ "subs x20, x20, #0x1\n"
+ "fcvt z12.h, p1/m, z12.s\n"
+ "fcvt z13.h, p1/m, z13.s\n"
+ "fcvt z14.h, p1/m, z14.s\n"
+ "fcvt z15.h, p1/m, z15.s\n"
+ ".inst 0xc173ca9c // fclamp { z28.h-z31.h }, z20.h, z19.h\n"
+ ".inst 0xc173ca8c // fclamp { z12.h-z15.h }, z20.h, z19.h\n"
+ "uzp1 z16.h, z28.h, z12.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 20f\n"
+ "subs x20, x20, #0x1\n"
+ "uzp1 z16.h, z29.h, z13.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 20f\n"
+ "uzp1 z16.h, z30.h, z14.h\n"
+ "st1h { z16.h }, p0, [x26]\n"
+ "20:" // Store to output array: Accumulator row 1 oddments: End
+ "21:" // Store to output array: End
+ "tbz x16, #0, 23f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "22:" // Store to output array: Refill accumulators: Loop
+ ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 22b\n"
+ "23:" // End block
+ "incw x10, ALL, MUL #2\n"
+ "cmp x10, x9\n"
+ "blt 3b\n"
+ "incw x11, ALL, MUL #2\n"
+ "mov x10, #0x0\n"
+ "cmp x11, x13\n"
+ "mov x28, x27\n"
+ "blt 3b\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp
new file mode 100644
index 0000000000..05029f04b0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL
+{
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
+
+ /* Kernel blocking parameters */
+ static unsigned int out_height()
+ {
+ return sme::get_vector_length<float>() * 4;
+ }
+
+ static unsigned int out_width()
+ {
+ return sme::get_vector_length<float>() * 1;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 2;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ static constexpr bool is_sme()
+ {
+ return true;
+ }
+
+ // Default to the generic kernel
+ kern_type kernel = sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL;
+
+ StdTransformsSME<operand_type, result_type, 4, 1, 2> transforms = {};
+
+ cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp
new file mode 100644
index 0000000000..8728cff31d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include "arm_gemm.hpp"
+
+
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer)
+{
+ struct KernelArgs
+ {
+ KernelArgs(
+ const __fp16 *const A,
+ const __fp16 *const B,
+ __fp16 *const C, const int ldc,
+ const int M, const int N, const int K,
+ const __fp16 *const bias,
+ const Activation act,
+ bool accumulate,
+ float *const accumulator_buffer
+ ) : A(A),
+ B(B), kstride_bytes(roundup(K, 2) * sizeof(__fp16)),
+ C(C), ldcb(ldc * sizeof(__fp16)),
+ M(M), N(N), K(K),
+ min(-static_cast<__fp16>(std::numeric_limits<float>::infinity())),
+ max(static_cast<__fp16>(std::numeric_limits<float>::infinity())),
+ bias(bias),
+ accumulator_buffer(accumulator_buffer),
+ flags(0x0)
+ {
+ if (accumulate)
+ {
+ flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER
+ }
+ if (C == nullptr)
+ {
+ flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER
+ }
+
+ // Initialise the activation values
+ switch (act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ this->max = static_cast<__fp16>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ this->min = static_cast<__fp16>(0);
+ break;
+ }
+ }
+
+ const __fp16 *const A;
+ const __fp16 *const B;
+ const long kstride_bytes;
+ __fp16 *const C;
+ const long ldcb;
+ const long M, N, K;
+ __fp16 min = -static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ __fp16 max = static_cast<__fp16>(std::numeric_limits<float>::infinity());
+
+ const __fp16 *const bias;
+
+ float *const accumulator_buffer;
+ uint64_t flags;
+ };
+
+ // Construct arguments for this kernel
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
+
+ __asm__ __volatile__(
+ "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "ptrue p1.b\n"
+ ".inst 0x25207811 // ptrue pn9.b\n"
+ "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x16, #0, 2f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "1:" // Initial accumulator load from buffer: Loop
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 1b\n"
+ "2:" // Initial accumulator load from buffer: End
+ "ldr w13, [%x[args], %[offsetof_M]]\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
+ "ldr w9, [%x[args], %[offsetof_N]]\n"
+ "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "3:" // M and N loop
+ "mov x27, x28\n"
+ "whilelt p8.s, x10, x9\n"
+ "tbnz x16, #0, 4f\n"
+ "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "cbz x20, 5f\n"
+ "whilelt p0.h, x10, x9\n"
+ "fmov z5.h, #0.0\n"
+ "fmov z18.h, #1.0\n"
+ "ld1h { z31.h }, p0/Z, [x20, x10, LSL #1]\n"
+ "zip1 z15.h, z31.h, z5.h\n"
+ ".inst 0x81af2640 // fmopa za0.s, p1/M, p1/M, z18.h, z15.h\n"
+ ".inst 0x81af2641 // fmopa za1.s, p1/M, p1/M, z18.h, z15.h\n"
+ ".inst 0x81af2642 // fmopa za2.s, p1/M, p1/M, z18.h, z15.h\n"
+ ".inst 0x81af2643 // fmopa za3.s, p1/M, p1/M, z18.h, z15.h\n"
+ "4:" // Prepare accumulators: Test for last block
+ "mov x20, x10\n"
+ "mov x21, x11\n"
+ "incw x20\n"
+ "incw x21, ALL, MUL #4\n"
+ "cmp x20, x9\n"
+ "mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
+ "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
+ "cmp x21, x13\n"
+ "csel x16, x20, x16, LT\n"
+ "5:" // Prepare accumulators: End
+ "ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "add x20, x20, #0x1\n"
+ "lsr x20, x20, #0x1\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa140a773 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x27]\n"
+ "ldnt1h { z17.h }, p1/Z, [x23]\n"
+ ".inst 0xa041a76c // ld1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "ldnt1h { z26.h }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa042a760 // ld1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "ldnt1h { z30.h }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa143a770 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ "ldnt1h { z18.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ "ble 7f\n"
+ "6:" // K loop
+ ".inst 0x81b12660 // fmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0x81b126e1 // fmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
+ ".inst 0x81b12762 // fmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
+ ".inst 0x81b127e3 // fmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
+ ".inst 0xa140a773 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0x81ba2580 // fmopa za0.s, p1/M, p1/M, z12.h, z26.h\n"
+ "ld1h { z17.h }, p1/Z, [x23]\n"
+ ".inst 0x81ba25a1 // fmopa za1.s, p1/M, p1/M, z13.h, z26.h\n"
+ ".inst 0x81ba25c2 // fmopa za2.s, p1/M, p1/M, z14.h, z26.h\n"
+ ".inst 0x81ba25e3 // fmopa za3.s, p1/M, p1/M, z15.h, z26.h\n"
+ ".inst 0xa041a76c // ld1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0x81be2400 // fmopa za0.s, p1/M, p1/M, z0.h, z30.h\n"
+ "ld1h { z26.h }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0x81be2421 // fmopa za1.s, p1/M, p1/M, z1.h, z30.h\n"
+ ".inst 0x81be2442 // fmopa za2.s, p1/M, p1/M, z2.h, z30.h\n"
+ ".inst 0x81be2463 // fmopa za3.s, p1/M, p1/M, z3.h, z30.h\n"
+ ".inst 0xa042a760 // ld1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1h { z30.h }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0x81b22600 // fmopa za0.s, p1/M, p1/M, z16.h, z18.h\n"
+ ".inst 0x81b22681 // fmopa za1.s, p1/M, p1/M, z20.h, z18.h\n"
+ ".inst 0x81b22702 // fmopa za2.s, p1/M, p1/M, z24.h, z18.h\n"
+ ".inst 0x81b22783 // fmopa za3.s, p1/M, p1/M, z28.h, z18.h\n"
+ ".inst 0xa143a770 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ "ld1h { z18.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ "bgt 6b\n"
+ "7:" // K loop tail
+ ".inst 0x81b12660 // fmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
+ ".inst 0x81b126e1 // fmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
+ ".inst 0x81b12762 // fmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
+ ".inst 0x81b127e3 // fmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
+ ".inst 0x81ba2580 // fmopa za0.s, p1/M, p1/M, z12.h, z26.h\n"
+ ".inst 0x81ba25a1 // fmopa za1.s, p1/M, p1/M, z13.h, z26.h\n"
+ ".inst 0x81ba25c2 // fmopa za2.s, p1/M, p1/M, z14.h, z26.h\n"
+ ".inst 0x81ba25e3 // fmopa za3.s, p1/M, p1/M, z15.h, z26.h\n"
+ ".inst 0x81be2400 // fmopa za0.s, p1/M, p1/M, z0.h, z30.h\n"
+ ".inst 0x81be2421 // fmopa za1.s, p1/M, p1/M, z1.h, z30.h\n"
+ ".inst 0x81be2442 // fmopa za2.s, p1/M, p1/M, z2.h, z30.h\n"
+ ".inst 0x81be2463 // fmopa za3.s, p1/M, p1/M, z3.h, z30.h\n"
+ ".inst 0x81b22600 // fmopa za0.s, p1/M, p1/M, z16.h, z18.h\n"
+ ".inst 0x81b22681 // fmopa za1.s, p1/M, p1/M, z20.h, z18.h\n"
+ ".inst 0x81b22702 // fmopa za2.s, p1/M, p1/M, z24.h, z18.h\n"
+ ".inst 0x81b22783 // fmopa za3.s, p1/M, p1/M, z28.h, z18.h\n"
+ "8:" // K oddments
+ "cbz x20, 10f\n"
+ "9:" // K oddments: Loop
+ ".inst 0xa140a773 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x27, x27, #4\n"
+ "ld1h { z17.h }, p1/Z, [x23]\n"
+ "addvl x23, x23, #1\n"
+ ".inst 0x81b12660 // fmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
+ ".inst 0x81b126e1 // fmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
+ ".inst 0x81b12762 // fmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
+ ".inst 0x81b127e3 // fmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
+ "bgt 9b\n"
+ "10:" // K oddments: End
+ "tbz x16, #1, 14f\n"
+ "tbz x16, #0, 12f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "11:" // Store to partial result buffer: Store and refill: Loop
+ ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa060c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14]\n"
+ ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa061c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa063c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 11b\n"
+ "b 29f\n"
+ "12:" // Store to partial result buffer: Store only
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "13:" // Store to partial result buffer: Store only: Loop
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5dc // st1w { z28.s-z31.s }, pn9.b, [x14]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa061c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa062c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 13b\n"
+ "b 29f\n"
+ "14:" // Store to output array
+ "ldr x26, [%x[args], %[offsetof_C]]\n"
+ "sub x25, x13, x11\n"
+ "cntw x24\n"
+ "ld1rh { z29.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "whilelt p0.s, x10, x9\n"
+ "cmp x25, x24\n"
+ "ld1rh { z28.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "csel x22, x25, x24, LT\n"
+ "mov x12, #0x0\n"
+ "add x26, x26, x10, LSL #1\n" // C += n
+ "lsr x21, x22, #0x2\n"
+ "madd x26, x11, x23, x26\n" // C += m * ldc
+ "and x20, x22, #0x3\n"
+ "cbz x21, 16f\n"
+ "15:" // Store to output array: Accumulator row 0 loop
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ "fcvt z0.h, p1/m, z0.s\n"
+ "fcvt z1.h, p1/m, z1.s\n"
+ "fcvt z2.h, p1/m, z2.s\n"
+ "fcvt z3.h, p1/m, z3.s\n"
+ "cmp x12, x21, LSL #2\n"
+ ".inst 0xc17ccba0 // fclamp { z0.h-z3.h }, z29.h, z28.h\n"
+ "st1h { z0.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z1.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z2.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z3.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "blt 15b\n"
+ "16:" // Store to output array: Accumulator row 0 oddments
+ "cbz x20, 17f\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ "fcvt z16.h, p1/m, z16.s\n"
+ "fcvt z17.h, p1/m, z17.s\n"
+ "fcvt z18.h, p1/m, z18.s\n"
+ "fcvt z19.h, p1/m, z19.s\n"
+ ".inst 0xc17ccbb0 // fclamp { z16.h-z19.h }, z29.h, z28.h\n"
+ "st1h { z16.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 17f\n"
+ "subs x20, x20, #0x1\n"
+ "st1h { z17.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 17f\n"
+ "st1h { z18.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "17:" // Store to output array: Accumulator row 0 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 27f\n"
+ "whilelt p0.s, x10, x9\n"
+ "cmp x25, x24\n"
+ "csel x22, x25, x24, LT\n"
+ "mov x12, #0x0\n"
+ "lsr x21, x22, #0x2\n"
+ "and x20, x22, #0x3\n"
+ "cbz x21, 19f\n"
+ "18:" // Store to output array: Accumulator row 1 loop
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ "fcvt z24.h, p1/m, z24.s\n"
+ "fcvt z25.h, p1/m, z25.s\n"
+ "fcvt z26.h, p1/m, z26.s\n"
+ "fcvt z27.h, p1/m, z27.s\n"
+ "cmp x12, x21, LSL #2\n"
+ ".inst 0xc17ccbb8 // fclamp { z24.h-z27.h }, z29.h, z28.h\n"
+ "st1h { z24.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z25.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z26.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z27.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "blt 18b\n"
+ "19:" // Store to output array: Accumulator row 1 oddments
+ "cbz x20, 20f\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ "fcvt z0.h, p1/m, z0.s\n"
+ "fcvt z1.h, p1/m, z1.s\n"
+ "fcvt z2.h, p1/m, z2.s\n"
+ "fcvt z3.h, p1/m, z3.s\n"
+ ".inst 0xc17ccba0 // fclamp { z0.h-z3.h }, z29.h, z28.h\n"
+ "st1h { z0.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 20f\n"
+ "subs x20, x20, #0x1\n"
+ "st1h { z1.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 20f\n"
+ "st1h { z2.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "20:" // Store to output array: Accumulator row 1 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 27f\n"
+ "whilelt p0.s, x10, x9\n"
+ "cmp x25, x24\n"
+ "csel x22, x25, x24, LT\n"
+ "mov x12, #0x0\n"
+ "lsr x21, x22, #0x2\n"
+ "and x20, x22, #0x3\n"
+ "cbz x21, 22f\n"
+ "21:" // Store to output array: Accumulator row 2 loop
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ "fcvt z20.h, p1/m, z20.s\n"
+ "fcvt z21.h, p1/m, z21.s\n"
+ "fcvt z22.h, p1/m, z22.s\n"
+ "fcvt z23.h, p1/m, z23.s\n"
+ "cmp x12, x21, LSL #2\n"
+ ".inst 0xc17ccbb4 // fclamp { z20.h-z23.h }, z29.h, z28.h\n"
+ "st1h { z20.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z21.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z22.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z23.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "blt 21b\n"
+ "22:" // Store to output array: Accumulator row 2 oddments
+ "cbz x20, 23f\n"
+ ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ "fcvt z12.h, p1/m, z12.s\n"
+ "fcvt z13.h, p1/m, z13.s\n"
+ "fcvt z14.h, p1/m, z14.s\n"
+ "fcvt z15.h, p1/m, z15.s\n"
+ ".inst 0xc17ccbac // fclamp { z12.h-z15.h }, z29.h, z28.h\n"
+ "st1h { z12.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 23f\n"
+ "subs x20, x20, #0x1\n"
+ "st1h { z13.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 23f\n"
+ "st1h { z14.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "23:" // Store to output array: Accumulator row 2 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 27f\n"
+ "whilelt p0.s, x10, x9\n"
+ "cmp x25, x24\n"
+ "csel x20, x25, x24, LT\n"
+ "mov x12, #0x0\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "cbz x21, 25f\n"
+ "24:" // Store to output array: Accumulator row 3 loop
+ ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ "fcvt z4.h, p1/m, z4.s\n"
+ "fcvt z5.h, p1/m, z5.s\n"
+ "fcvt z6.h, p1/m, z6.s\n"
+ "fcvt z7.h, p1/m, z7.s\n"
+ "cmp x12, x21, LSL #2\n"
+ ".inst 0xc17ccba4 // fclamp { z4.h-z7.h }, z29.h, z28.h\n"
+ "st1h { z4.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z5.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z6.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1h { z7.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "blt 24b\n"
+ "25:" // Store to output array: Accumulator row 3 oddments
+ "cbz x20, 26f\n"
+ ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ "fcvt z4.h, p1/m, z4.s\n"
+ "fcvt z5.h, p1/m, z5.s\n"
+ "fcvt z6.h, p1/m, z6.s\n"
+ "fcvt z7.h, p1/m, z7.s\n"
+ ".inst 0xc17ccba4 // fclamp { z4.h-z7.h }, z29.h, z28.h\n"
+ "st1h { z4.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 26f\n"
+ "subs x20, x20, #0x1\n"
+ "st1h { z5.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "beq 26f\n"
+ "st1h { z6.s }, p0, [x26]\n"
+ "26:" // Store to output array: Accumulator row 3 oddments: End
+ "27:" // Store to output array: End
+ "tbz x16, #0, 29f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "28:" // Store to output array: Refill accumulators: Loop
+ ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 28b\n"
+ "29:" // End block
+ "incw x10\n"
+ "cmp x10, x9\n"
+ "blt 3b\n"
+ "incw x11, ALL, MUL #4\n"
+ "mov x10, #0x0\n"
+ "cmp x11, x13\n"
+ "mov x28, x27\n"
+ "blt 3b\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
index dfa0167c00..7120d1d33e 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
@@ -193,4 +193,17 @@ void Transform<1, 2, true, VLType::SME>(
);
}
+template<>
+void Transform<1, 2, true, VLType::SME>(
+ __fp16 *out, const __fp16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+ sme_transpose_interleave_1VL_2x2(
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+ (xmax-x0) * sizeof(__fp16) / 2,
+ stride * sizeof(__fp16),
+ (kmax-k0)
+ );
+}
+
#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
index 13e0a38ebc..3fc3920500 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
@@ -195,4 +195,17 @@ void Transform<2, 2, true, VLType::SME>(
);
}
+template<>
+void Transform<2, 2, true, VLType::SME>(
+ __fp16 *out, const __fp16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+ sme_transpose_interleave_2VL_2x2(
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+ (xmax-x0) * sizeof(__fp16) / 2,
+ stride * sizeof(__fp16),
+ (kmax-k0)
+ );
+}
+
#endif
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
index 8badde53a9..9b28578217 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
@@ -155,4 +155,17 @@ void Transform<4, 2, true, VLType::SME>(
);
}
+template<>
+void Transform<4, 2, true, VLType::SME>(
+ __fp16 *out, const __fp16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+ sme_transpose_interleave_4VL_2x2(
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+ (xmax-x0) * sizeof(__fp16) / 2,
+ stride * sizeof(__fp16),
+ (kmax-k0)
+ );
+}
+
#endif