aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels
diff options
context:
space:
mode:
authorFrancesco.Petrogalli@arm.com <francesco.petrogalli@arm.com>2022-04-05 10:31:08 +0000
committerFrancesco Petrogalli <francesco.petrogalli@arm.com>2022-05-24 14:28:27 +0000
commit5fcf22dadf092efd7aafb359f9229aa270eb1129 (patch)
treef309426ed19bd6710329da3b530167db72d1c6b2 /src/core/NEON/kernels
parenta8caa023f0d7b71b3a250a14ceee935052fcc74a (diff)
downloadComputeLibrary-5fcf22dadf092efd7aafb359f9229aa270eb1129.tar.gz
[arm_gemm] Import fixed-format kernels from gemm_linux.
This is a No Functional Change Intended (NFCI) patch. It imports the kernel in the code, but the interface to select them and expose the format of the weight tensors to the user will be provided in a subsequent patch. Kernels and kernel selection code in arm_gemm has been provided by David.Mansell <David.Mansell@arm.com>. The kernels are not compiled in the library by default, but need to be selected via the `scons` option `experimental_fixed_format_kernels=1`. Resolves: ONCPUML-829 Signed-off-by: Francesco.Petrogalli@arm.com <francesco.petrogalli@arm.com> Change-Id: If00ccb2b9b7221e01b214cf9783111226ccc8bf4 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7380 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp54
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp45
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp87
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp140
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp97
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp143
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernel_weight_format.hpp60
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp109
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp3807
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp108
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp5429
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp108
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp3461
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp109
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp2561
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp101
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp269
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp109
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp314
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp100
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp264
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp100
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp332
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp109
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp2227
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp116
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp1530
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp3318
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp116
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp1530
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp2310
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp109
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp1464
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp109
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp319
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp108
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp297
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp269
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp108
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp297
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp273
-rw-r--r--src/core/NEON/kernels/arm_gemm/misc.cpp44
42 files changed, 32502 insertions, 58 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
index dd72fb5901..50fc5bdb8a 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
@@ -33,12 +33,21 @@
#include "kernels/a32_sgemm_8x6.hpp"
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp"
+#include "kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp"
+#include "kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp"
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/a64_hybrid_bf16fp32_dot_6x16.hpp"
#include "kernels/a64_hybrid_bf16fp32_mmla_6x16.hpp"
#include "kernels/a64_interleaved_bf16fp32_dot_8x12.hpp"
#include "kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp"
#include "kernels/a64_sgemm_8x12.hpp"
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp"
+#include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp"
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp"
#include "kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp"
#include "kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp"
@@ -80,6 +89,24 @@ GemmImplementation<bfloat16, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, float>(args); }
),
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+GemmImplementation<bfloat16, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffinterleaved_bf16fp32_mmla_8x3VL",
+ KernelWeightFormat::VL2VL_BL64,
+ [](const GemmArgs &args) { return args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, bfloat16, float>(args); }
+),
+GemmImplementation<bfloat16, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffhybrid_bf16fp32_mmla_6x4VL",
+ KernelWeightFormat::VL2VL_BL64,
+ [](const GemmArgs &args) { return args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_bf16fp32_mmla_6x4VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_bf16fp32_mmla_6x4VL, bfloat16, float>(args); }
+),
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#endif // ARM_COMPUTE_ENABLE_SVE
GemmImplementation<bfloat16, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
@@ -109,6 +136,32 @@ GemmImplementation<bfloat16, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, float>(args); }
),
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+GemmImplementation<bfloat16, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_ffinterleaved_bf16fp32_mmla_8x12",
+ KernelWeightFormat::VL256_BL64,
+ [](const GemmArgs &args) { return args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, bfloat16, float>(args); }
+),
+GemmImplementation<bfloat16, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_ffhybrid_bf16fp32_mmla_6x16",
+ KernelWeightFormat::VL256_BL64,
+ [](const GemmArgs &args) { return args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_bf16fp32_mmla_6x16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_bf16fp32_mmla_6x16, bfloat16, float>(args); }
+),
+GemmImplementation<bfloat16, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_ffinterleaved_bf16fp32_dot_8x12",
+ KernelWeightFormat::VL128_BL32,
+ [](const GemmArgs &args) { return args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_dot_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_dot_8x12, bfloat16, float>(args); }
+),
+#endif // ENABLE_FIXED_FORMAT_KERNELS
GemmImplementation<bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_sgemm_8x12",
@@ -145,6 +198,7 @@ const GemmImplementation<bfloat16, float> *gemm_implementation_list<bfloat16, fl
/* Explicitly instantiate the external functions for these types. */
template UniqueGemmCommon<bfloat16, float> gemm<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
template bool has_opt_gemm<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
template std::vector<KernelDescription> get_compatible_kernels<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
index 42f4528066..2796b0d204 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
@@ -34,9 +34,17 @@
#include "gemm_interleaved.hpp"
#include "kernels/a32_sgemm_8x6.hpp"
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/a64_ffhybrid_fp16_mla_6x32.hpp"
+#include "kernels/a64_ffinterleaved_fp16_mla_8x24.hpp"
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/a64_hgemm_8x24.hpp"
#include "kernels/a64_hybrid_fp16_mla_6x32.hpp"
#include "kernels/a64_sgemm_8x12.hpp"
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp"
+#include "kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp"
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/sve_hybrid_fp16_mla_6x4VL.hpp"
#include "kernels/sve_interleaved_fp16_mla_8x3VL.hpp"
@@ -58,6 +66,24 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16>(args); }
),
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+GemmImplementation<__fp16, __fp16>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffinterleaved_fp16_mla_8x3VL",
+ KernelWeightFormat::VL1VL_BL16,
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp16_mla_8x3VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp16_mla_8x3VL, __fp16, __fp16>(args); }
+),
+GemmImplementation<__fp16, __fp16>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_ffhybrid_fp16_mla_6x4VL",
+ KernelWeightFormat::VL1VL_BL16,
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp16_mla_6x4VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp16_mla_6x4VL, __fp16, __fp16>(args); }
+),
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#endif // ARM_COMPUTE_ENABLE_SVE
#if defined(__aarch64__)
GemmImplementation<__fp16, __fp16>::with_estimate(
@@ -74,6 +100,24 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16>(args); }
),
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+GemmImplementation<__fp16, __fp16>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_ffinterleaved_fp16_mla_8x24",
+ KernelWeightFormat::VL128_BL16,
+ [](const GemmArgs &args) { return args._ci->has_fp16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp16_mla_8x24, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp16_mla_8x24, __fp16, __fp16>(args); }
+),
+GemmImplementation<__fp16, __fp16>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_ffhybrid_fp16_mla_6x32",
+ KernelWeightFormat::VL128_BL16,
+ [](const GemmArgs &args) { return args._ci->has_fp16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp16_mla_6x32, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp16_mla_6x32, __fp16, __fp16>(args); }
+),
+#endif // ENABLE_FIXED_FORMAT_KERNELS
{
GemmMethod::GEMM_INTERLEAVED,
"a64_sgemm_8x12",
@@ -109,6 +153,7 @@ const GemmImplementation<__fp16, __fp16> *gemm_implementation_list<__fp16, __fp1
/* Explicitly instantiate the external functions for these types. */
template UniqueGemmCommon<__fp16, __fp16> gemm<__fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
template bool has_opt_gemm<__fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<__fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
template std::vector<KernelDescription> get_compatible_kernels<__fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index 69a2803903..4f7e191fb3 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -31,6 +31,12 @@
#include "gemv_pretransposed.hpp"
#include "kernels/a32_sgemm_8x6.hpp"
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/a64_ffhybrid_fp32_mla_6x16.hpp"
+#include "kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp"
+#include "kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp"
+#include "kernels/a64_ffinterleaved_fp32_mla_8x12.hpp"
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/a64_hybrid_fp32bf16fp32_mmla_4x24.hpp"
#include "kernels/a64_hybrid_fp32bf16fp32_mmla_6x16.hpp"
#include "kernels/a64_hybrid_fp32_mla_4x24.hpp"
@@ -42,6 +48,12 @@
#include "kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp"
#include "kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp"
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp"
+#include "kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp"
+#include "kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp"
+#include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp"
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp"
#include "kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp"
#include "kernels/sve_hybrid_fp32_mla_6x4VL.hpp"
@@ -73,6 +85,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>(args); }
),
+
GemmImplementation<float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32bf16fp32_mmla_6x16",
@@ -152,6 +165,42 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>(args); }
),
+ #ifdef ENABLE_FIXED_FORMAT_KERNELS
+#ifdef ARM_COMPUTE_ENABLE_BF16
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffinterleaved_bf16fp32_mmla_8x3VL",
+ KernelWeightFormat::VL2VL_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_ffhybrid_fp32bf16fp32_mmla_4x6VL",
+ KernelWeightFormat::VL2VL_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL, float, float>(args); }
+),
+#endif
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffinterleaved_fp32_mla_8x3VL",
+ KernelWeightFormat::VL1VL_BL32,
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_ffhybrid_fp32_mla_6x4VL",
+ KernelWeightFormat::VL1VL_BL32,
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32_mla_6x4VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32_mla_6x4VL, float, float>(args); }
+),
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#endif // ARM_COMPUTE_ENABLE_SVE
// Cortex-A35 specific kernel - use for any problem on A35, and never in any other cases.
{
@@ -204,6 +253,43 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, float, float>(args); }
),
+#ifdef ENABLE_FIXED_FORMAT_KERNELS
+#ifdef ARM_COMPUTE_ENABLE_BF16
+// "fast mode" (BF16) kernels
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_ffinterleaved_bf16fp32_mmla_8x12",
+ KernelWeightFormat::VL256_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_ffhybrid_fp32bf16fp32_mmla_4x24",
+ KernelWeightFormat::VL256_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>(args); }
+),
+#endif // BF16
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_ffinterleaved_fp32_mla_8x12",
+ KernelWeightFormat::VL128_BL32,
+ nullptr,
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_ffhybrid_fp32_mla_6x16",
+ KernelWeightFormat::VL128_BL32,
+ nullptr,
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32_mla_6x16, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32_mla_6x16, float, float>(args); }
+),
+#endif // ENABLE_FIXED_FORMAT_KERNELS
#endif // __aarch64__
#ifdef __arm__
@@ -233,6 +319,7 @@ const GemmImplementation<float, float> *gemm_implementation_list<float, float>()
/* Explicitly instantiate the external functions for these types. */
template UniqueGemmCommon<float, float> gemm<float, float, Nothing>(const GemmArgs &args, const Nothing &);
template bool has_opt_gemm<float, float, Nothing>(const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<float, float, Nothing>(const GemmArgs &args, const Nothing &);
template std::vector<KernelDescription> get_compatible_kernels<float, float, Nothing> (const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp
index 5b3ef4203d..c41b0a5b3e 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp
@@ -33,6 +33,7 @@
#include "arm_gemm.hpp"
#include "bias_adder.hpp"
#include "convolver.hpp"
+#include "kernel_weight_format.hpp"
#include "ndrange.hpp"
#include "performance_parameters.hpp"
#include "transform.hpp"
@@ -54,7 +55,7 @@ namespace {
// We need to invoke the kernel differently for quantizing and non-quantizing cases, so here is a shim class to do
// that.
-template<typename OutputStage, bool SeparateQuantize = false>
+template<typename OutputStage, bool SeparateQuantize, bool FixedFormat>
class run_hybrid_kernel {
public:
template<typename strategy, typename Tlo, typename Tro, typename Tr>
@@ -63,18 +64,18 @@ public:
profiler &prof,
#endif
const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
- unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
+ unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
const OutputStage &os, const int32_t *col_bias, unsigned int n_0 );
};
template<>
template<typename strategy, typename Tlo, typename Tro, typename Tr>
-inline void run_hybrid_kernel<Nothing, false>::run(
+inline void run_hybrid_kernel<Nothing, false, false>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
- unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
+ unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
const Nothing &, const int32_t *, unsigned int) {
#ifdef CYCLE_PROFILING
auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
@@ -115,12 +116,60 @@ inline void run_hybrid_kernel<Nothing, false>::run(
template<>
template<typename strategy, typename Tlo, typename Tro, typename Tr>
-inline void run_hybrid_kernel<Requantize32, false>::run(
+inline void run_hybrid_kernel<Nothing, false, true>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
- unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
+ unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
+ const Nothing &, const int32_t *, unsigned int) {
+#ifdef CYCLE_PROFILING
+ auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
+#endif
+ UNUSED(kern_k);
+
+ /* Indirect hybrid kernels read the full width of the bias. So we need to detect the case where we are writing
+ * a partial block and pad the bias for that block. */
+ if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
+ /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
+ unsigned int N_remainder = N % strategy::out_width();
+ unsigned int N_bulk = N - N_remainder;
+
+ /* Output argument to be used for the tail */
+ IndirectOutputArg<Tr> offset_output = output_arg;
+
+ /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
+ if (N_bulk > 0) {
+ strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
+
+ if (output_arg.is_indirect) {
+ offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
+ } else {
+ offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
+ }
+ }
+
+ /* Pad the bias buffer for the remainder */
+ Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
+ memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
+
+ /* Process the remainder, offsetting the B pointer as needed. */
+ strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder,
+ b_ptr + (N_bulk / strategy::stripe_width()) * b_stride, b_stride, offset_output,
+ bias_pad_buffer, act, accumulate);
+ } else {
+ strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
+ }
+}
+
+template<>
+template<typename strategy, typename Tlo, typename Tro, typename Tr>
+inline void run_hybrid_kernel<Requantize32, false, false>::run(
+#ifdef CYCLE_PROFILING
+ profiler &prof,
+#endif
+ const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
+ unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
#ifdef CYCLE_PROFILING
auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
@@ -132,12 +181,12 @@ inline void run_hybrid_kernel<Requantize32, false>::run(
template<>
template<typename strategy, typename Tlo, typename Tro, typename Tr>
-inline void run_hybrid_kernel<Requantize32, true>::run(
+inline void run_hybrid_kernel<Requantize32, true, false>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
- unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
+ unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
UNUSED(kern_k);
// On this route we will only process one kernel height at a time and will make sure this happens in the driver loop.
@@ -180,10 +229,38 @@ inline void run_hybrid_kernel<Requantize32, true>::run(
}
}
+template<typename strategy, bool FixedFormat>
+struct stripe_width {
+ static unsigned int get() {
+ return strategy::stripe_width();
+ }
+};
+
+template<typename strategy>
+struct stripe_width<strategy, false> {
+ static unsigned int get() {
+ return 0;
+ }
+};
+
+template<typename strategy, bool FixedFormat>
+struct kernel_weight_format {
+ static KernelWeightFormat get() {
+ return strategy::kernel_weight_format();
+ }
+};
+
+template<typename strategy>
+struct kernel_weight_format<strategy, false> {
+ static KernelWeightFormat get() {
+ return KernelWeightFormat::NON_FIXED;
+ }
+};
+
} // anonymous namespace
// Implementation of the GemmCommon abstract class.
-template<typename strategy, typename To, typename Tr, typename OutputStage = Nothing, bool SeparateQuantize = false>
+template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool SeparateQuantize=false, bool FixedFormat=false>
class GemmHybridIndirect : public GemmCommon<To, Tr> {
typedef typename strategy::lhs_operand_type Tloi;
typedef typename strategy::rhs_operand_type Troi;
@@ -425,24 +502,32 @@ public:
const unsigned int nmax = std::min(n0 + _n_block, _args._Nsize);
const unsigned int multi = p.dim(3);
- const Troi *b_panel = _B_transposed +
- (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) +
- (k0 * roundup(_args._Nsize, strategy::out_width())) +
- (n0 * kern_k);
+ const Troi *b_panel;
+ if (FixedFormat) {
+ b_panel = reinterpret_cast<const Troi *>(this->_Bptr) +
+ (multi * this->_B_multi_stride) +
+ ((n0 / stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
+ (k0 * stripe_width<strategy, FixedFormat>::get());
+ } else {
+ b_panel = _B_transposed +
+ (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) +
+ (k0 * roundup(_args._Nsize, strategy::out_width())) +
+ (n0 * kern_k);
+ }
- IndirectOutputArg<Tr> out_arg(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc);
+ IndirectOutputArg<Tr> out_arg(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc);
#ifdef CYCLE_PROFILING
auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
#endif
if (_indirect_buf) {
- run_hybrid_kernel<OutputStage, SeparateQuantize>::run(
+ run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
#ifdef CYCLE_PROFILING
prof,
#endif
strat, sections, string_lengths.data(),
IndirectInputArg<To>(_indirect_buf + (multi * _args._nbatches * _args._Ksections) + (batch * _args._Ksections) + first_section, m_start, first_offset),
- (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
+ (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
(this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
last_pass ? _args._act : Activation(),
!first_pass,
@@ -469,13 +554,13 @@ public:
}
assert(pos == sections);
- run_hybrid_kernel<OutputStage, SeparateQuantize>::run(
+ run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
#ifdef CYCLE_PROFILING
prof,
#endif
strat, sections, string_lengths.data(),
IndirectInputArg<To>(in_row_strings.data(), 0, first_offset),
- (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
+ (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
(this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
last_pass ? _args._act : Activation(),
!first_pass,
@@ -485,13 +570,13 @@ public:
// Length to process. This needs to exclude padding, but 'kmax' potentially includes it.
const unsigned int len = (std::min(_args._Ksize, kmax) - k0);
- run_hybrid_kernel<OutputStage, SeparateQuantize>::run(
+ run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
#ifdef CYCLE_PROFILING
prof,
#endif
strat, 1, &len,
IndirectInputArg<To>(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + m_start * this->_lda + k0, this->_lda),
- (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
+ (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
(this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
last_pass ? _args._act : Activation(),
!first_pass,
@@ -504,14 +589,18 @@ public:
// Interface implementation - pretransposed
bool B_is_pretransposed() const override {
- return true;
+ return (FixedFormat == false);
}
bool B_pretranspose_required() const override {
- return (_B_transposed==nullptr);
+ return (FixedFormat == false) && (_B_transposed==nullptr);
}
size_t get_B_pretransposed_array_size() const override {
+ if (FixedFormat) {
+ return 0;
+ }
+
// Start with actual pretransposed buffer...
size_t size = roundup(_args._Nsize, strategy::out_width()) * _Ktotal * _args._nmulti * sizeof(Troi);
@@ -599,8 +688,7 @@ public:
}
}
} else {
- // In the single K section case, can process the whole lot in one go.
- // Caution: 'blockwalker::kmax()' rounds up, so clamp to valid _Ksize.
+ // In the single K section case, can process the whole lot in one go.
strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
0, _args._Nsize, k0, std::min(kmax, _args._Ksize));
buffer += roundup(_args._Nsize, strategy::out_width()) * roundup(kmax-k0, strategy::k_unroll());
@@ -694,11 +782,15 @@ public:
c.inner_block_size = _k_block;
c.outer_block_size = _n_block;
c.filter = get_type_name<strategy>();
+ c.weight_format = get_weight_format(kernel_weight_format<strategy, FixedFormat>::get(), sizeof(To));
return c;
}
};
+template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
+using GemmHybridIndirectFixedFormat = GemmHybridIndirect<strategy, To, Tr, OutputStage, false, true>;
+
} // namespace arm_gemm
#ifdef __I_DEFINED_UNUSED
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp b/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp
index cb3ff7aa29..75fb1cb306 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp
@@ -24,6 +24,8 @@
#include "arm_gemm.hpp"
+#include "kernel_weight_format.hpp"
+
#include <cstdint>
#include <functional>
@@ -37,15 +39,36 @@ template<typename Top, typename Tret, class OutputStage = Nothing>
struct GemmImplementation {
const GemmMethod method;
const char * name;
+ const KernelWeightFormat kernel_weight_format = KernelWeightFormat::NON_FIXED;
std::function<bool(const GemmArgs &, const OutputStage &)> is_supported = {};
std::function<uint64_t(const GemmArgs &, const OutputStage &)> cycle_estimate = {};
std::function<GemmCommon<Top, Tret> *(const GemmArgs &, const OutputStage &)> instantiate = {};
bool do_is_supported(const GemmArgs &args, const OutputStage &os) const {
- if (is_supported != nullptr) {
- return is_supported(args, os);
+ // Check supplied is_supported() function first.
+ if (is_supported != nullptr && !is_supported(args, os)) {
+ return false;
+ }
+
+ // Check weight format is appropriate.
+ if (args._fixed_format == false) {
+ // Can't return a fixed format kernel if we weren't asked for one.
+ return (kernel_weight_format == KernelWeightFormat::NON_FIXED);
} else {
- return true;
+ // Fixed format kernel requested: if this is a non-fixed format kernel we can't use it.
+ if (kernel_weight_format == KernelWeightFormat::NON_FIXED) {
+ return false;
+ }
+
+ // If there's no config, or the config says ANY then this one is OK.
+ if (!args._cfg || args._cfg->weight_format == WeightFormat::ANY) {
+ return true;
+ }
+
+ // If we get here it means there is a config and it specifies a format. Check it matches this kernel.
+ // NOTE: this will execute SVE instructions if it's an SVE kernel, so it's important that is_supported()
+ // was called above first.
+ return (args._cfg->weight_format == get_weight_format(kernel_weight_format, sizeof(Top)));
}
}
@@ -84,6 +107,13 @@ struct GemmImplementation {
method(m), name(n), is_supported(is_supported),
cycle_estimate( [is_recommended](const GemmArgs &args, const OutputStage &os) { return (is_recommended == nullptr) ? 0 : (is_recommended(args, os) ? 0 : UINT64_MAX); } ),
instantiate(instantiate) { }
+
+ GemmImplementation(GemmMethod m, const char *n, KernelWeightFormat kwf,
+ std::function<bool(const GemmArgs &, const OutputStage &)> is_supported, std::function<bool(const GemmArgs &, const OutputStage &)> is_recommended,
+ std::function<GemmCommon<Top, Tret> *(const GemmArgs &, const OutputStage &)> instantiate) :
+ method(m), name(n), kernel_weight_format(kwf), is_supported(is_supported),
+ cycle_estimate( [is_recommended](const GemmArgs &args, const OutputStage &os) { return (is_recommended == nullptr) ? 0 : (is_recommended(args, os) ? 0 : UINT64_MAX); } ),
+ instantiate(instantiate) { }
};
/* Slightly different version of above for straightforward GEMMs with no
@@ -93,15 +123,36 @@ template<typename Top, typename Tret>
struct GemmImplementation<Top, Tret, Nothing> {
const GemmMethod method;
const char * name;
+ const KernelWeightFormat kernel_weight_format = KernelWeightFormat::NON_FIXED;
std::function<bool(const GemmArgs &)> is_supported = {};
std::function<uint64_t(const GemmArgs &)> cycle_estimate = {};
std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate = {};
bool do_is_supported(const GemmArgs &args, const Nothing &) const {
- if (is_supported != nullptr) {
- return is_supported(args);
+ // Check supplied is_supported() function first.
+ if (is_supported != nullptr && !is_supported(args)) {
+ return false;
+ }
+
+ // Check weight format is appropriate.
+ if (args._fixed_format == false) {
+ // Can't return a fixed format kernel if we weren't asked for one.
+ return (kernel_weight_format == KernelWeightFormat::NON_FIXED);
} else {
- return true;
+ // Fixed format kernel requested: if this is a non-fixed format kernel we can't use it.
+ if (kernel_weight_format == KernelWeightFormat::NON_FIXED) {
+ return false;
+ }
+
+ // If there's no config, or the config says ANY then this one is OK.
+ if (!args._cfg || args._cfg->weight_format == WeightFormat::ANY) {
+ return true;
+ }
+
+ // If we get here it means there is a config and it specifies a format. Check it matches this kernel.
+ // NOTE: this will execute SVE instructions if it's an SVE kernel, so it's important that is_supported()
+ // was called above first.
+ return (args._cfg->weight_format == get_weight_format(kernel_weight_format, sizeof(Top)));
}
}
@@ -129,10 +180,22 @@ struct GemmImplementation<Top, Tret, Nothing> {
return impl;
}
+ static GemmImplementation with_estimate(GemmMethod m, const char *n, KernelWeightFormat f,
+ std::function<bool(const GemmArgs &)> is_supported, std::function<uint64_t(const GemmArgs &)> cycle_estimate,
+ std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate) {
+ GemmImplementation impl(m,n,f);
+
+ impl.is_supported=is_supported;
+ impl.cycle_estimate=cycle_estimate;
+ impl.instantiate=instantiate;
+
+ return impl;
+ }
+
GemmImplementation(const GemmImplementation &) = default;
GemmImplementation & operator= (const GemmImplementation &) = default;
- GemmImplementation(GemmMethod m, const char * n) : method(m), name(n) {}
+ GemmImplementation(GemmMethod m, const char *n, KernelWeightFormat f=KernelWeightFormat::NON_FIXED) : method(m), name(n), kernel_weight_format(f) {}
GemmImplementation(GemmMethod m, const char *n,
std::function<bool(const GemmArgs &)> is_supported, std::function<bool(const GemmArgs &)> is_recommended,
@@ -140,6 +203,13 @@ struct GemmImplementation<Top, Tret, Nothing> {
method(m), name(n), is_supported(is_supported),
cycle_estimate( [is_recommended](const GemmArgs &args) -> uint64_t { return (is_recommended == nullptr) ? 0 : (is_recommended(args) ? 0 : UINT64_MAX); } ),
instantiate(instantiate) { }
+
+ GemmImplementation(GemmMethod m, const char *n, KernelWeightFormat kwf,
+ std::function<bool(const GemmArgs &)> is_supported, std::function<bool(const GemmArgs &)> is_recommended,
+ std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate) :
+ method(m), name(n), kernel_weight_format(kwf), is_supported(is_supported),
+ cycle_estimate( [is_recommended](const GemmArgs &args) -> uint64_t { return (is_recommended == nullptr) ? 0 : (is_recommended(args) ? 0 : UINT64_MAX); } ),
+ instantiate(instantiate) { }
};
/* "Main" function implemented for each valid combination of types.
@@ -252,4 +322,17 @@ UniqueGemmCommon<Top, Tret> gemm(const GemmArgs &args, const OutputStage &os) {
return UniqueGemmCommon<Top, Tret>(nullptr);
}
+template<typename Top, typename Tret, class OutputStage>
+KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage &os) {
+ const GemmImplementation<Top, Tret, OutputStage> *impl;
+
+ if (find_implementation<Top, Tret>(args, os, impl)) {
+ return KernelDescription(impl->method, impl->name);
+ }
+
+ /* This shouldn't happen - there should always be at least one valid implementation. */
+ return KernelDescription();
+}
+
+
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
index c75c320a6b..4ad54426e9 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,7 +27,9 @@
#include <cassert>
#include "arm_gemm.hpp"
+#include "bfloat.hpp"
#include "convolver.hpp"
+#include "kernel_weight_format.hpp"
#include "mergeresults.hpp"
#include "performance_parameters.hpp"
#include "quantized.hpp"
@@ -56,7 +58,7 @@ namespace {
// Others output directly to the matrix result. This helper class calls the
// appropriate functions, using templating to avoid calling non-existent
// functions.
-template<bool MergeStep, typename OutputStage>
+template<bool MergeStep, bool FixedFormat, typename OutputStage>
class kernel_and_merge {
public:
template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
@@ -64,7 +66,7 @@ public:
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, Tri *c_panel,
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
const Activation &act, bool accumulate, const OutputStage &os, const int32_t *col_bias,
@@ -74,11 +76,11 @@ public:
// Run a kernel and call the separate merge step
template<>
template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
-void kernel_and_merge<true, Nothing>::run(
+void kernel_and_merge<true, false, Nothing>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, Tri *c_panel,
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
const Activation &act, bool accumulate, const Nothing &, const int32_t *, Tab *)
@@ -101,14 +103,44 @@ void kernel_and_merge<true, Nothing>::run(
}
}
+// Run a fixed-format kernel and call the separate merge step
+template<>
+template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+void kernel_and_merge<true, true, Nothing>::run(
+#ifdef CYCLE_PROFILING
+ profiler &prof,
+#endif
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel,
+ Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
+ unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
+ const Activation &act, bool accumulate, const Nothing &, const int32_t *, Tab *)
+{
+ {
+#ifdef CYCLE_PROFILING
+ const int bblocks = iceildiv(n_max - n_0, strategy::out_width());
+ auto p=prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width() * kern_k));
+#endif
+
+ strat.kernel(a_ptr, b_panel, b_stride, c_panel, 1, (n_max - n_0), kern_k);
+ }
+
+ {
+#ifdef CYCLE_PROFILING
+ const int bblocks = iceildiv(n_max - n_0, strategy::out_width());
+ auto p=prof.ScopedProfiler(PROFILE_MERGE, (strategy::out_height() * bblocks * strategy::out_width() * sizeof(Tr)));
+#endif
+ strat.transforms.Merge(c_ptr, c_panel, ldc, m_0, m_max, n_0, n_max, biasptr, act, accumulate);
+ }
+}
+
// Run a kernel with integrated merge
template<>
template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
-void kernel_and_merge<false, Nothing>::run(
+void kernel_and_merge<false, false, Nothing>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, Tri *,
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
unsigned int n_0, unsigned int n_max, const Tr *biasptr,
const Activation &act, bool accumulate, const Nothing &, const int32_t *,
@@ -143,11 +175,11 @@ void kernel_and_merge<false, Nothing>::run(
// Run a kernel with integrated merge, quantizing
template<>
template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
-void kernel_and_merge<false, Requantize32>::run(
+void kernel_and_merge<false, false, Requantize32>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, Tri *,
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
unsigned int n_0, unsigned int n_max, const Tr *,
const Activation &, bool accumulate, const Requantize32 &qp, const int32_t *col_bias,
@@ -170,11 +202,11 @@ void kernel_and_merge<false, Requantize32>::run(
// Run a kernel and call the separate quantize step
template<>
template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
-void kernel_and_merge<true, Requantize32>::run(
+void kernel_and_merge<true, false, Requantize32>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, Tri *c_panel,
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *,
const Activation &, bool, const Requantize32 &qp, const int32_t *col_bias,
@@ -246,9 +278,49 @@ public:
typedef int32_t type;
};
+// Stripe width is a concept only needed for FixedFormat kernels. Use an accessor to avoid issues in other scenarios.
+template<typename strategy, bool FixedFormat>
+struct get_stripe_width {
+ static unsigned int get() {
+ return 0;
+ }
+};
+
+template<typename strategy>
+struct get_stripe_width<strategy, true> {
+ static unsigned int get() {
+ return strategy::stripe_width();
+ }
+};
+
+// KernelWeightFormat is a similar story.
+template<typename strategy, bool FixedFormat, typename To>
+struct get_kernel_weight_format {
+ static KernelWeightFormat get() {
+ return KernelWeightFormat::NON_FIXED;
+ }
+};
+
+template<typename strategy, typename To>
+struct get_kernel_weight_format<strategy, true, To> {
+ static KernelWeightFormat get() {
+ KernelWeightFormat kwf = strategy::kernel_weight_format();
+
+ // If we are using a BF16 kernel to do an FP32 problem (fast mode) then we need to set the BF16 flag on the
+ // weight format.
+ if (std::is_same<To, float>::value && std::is_same<typename strategy::operand_type, bfloat16>::value) {
+ uint32_t kwf_i = static_cast<uint32_t>(kwf);
+ kwf_i |= 0x10;
+ kwf = static_cast<KernelWeightFormat>(kwf_i);
+ }
+
+ return kwf;
+ }
+};
+
} // anonymous namespace
-template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool MergeStep=true, bool ForceThreadColumns=false>
+template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool MergeStep=true, bool FixedFormat=false, bool ForceThreadColumns=false>
class GemmInterleaved : public GemmCommon<To, Tr> {
typedef typename strategy::operand_type Toi;
typedef typename strategy::result_type Tri;
@@ -310,7 +382,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
class blockwalker {
private:
/* Size loops, etc. based on our parent's configuration */
- const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, ForceThreadColumns> &_parent;
+ const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &_parent;
/* K, X and multi parameters for current iteration. */
unsigned int _k0=0, _x0=0, _multi=0;
@@ -325,9 +397,9 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
bool _newmulti=true;
public:
- blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, ForceThreadColumns> &parent) : _parent(parent) { }
+ blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &parent) : _parent(parent) { }
- blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, ForceThreadColumns> &parent,
+ blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &parent,
unsigned int x_start, unsigned int x_end) : _parent(parent), _x0 (_x_start), _x_start(x_start), _x_end(x_end) { }
unsigned int xmax() {
@@ -666,7 +738,11 @@ public:
// Figure out how many "K" the kernel will actually process.
unsigned int kern_k = roundup(kmax - k0, strategy::k_unroll());
- const Toi *b_ptr = _B_transposed + (rounded_width * _Ktotal * multi) + (k0 * rounded_width) + (start_x * kern_k);
+ const Toi *b_ptr = FixedFormat ?
+ reinterpret_cast<const Toi *>(this->_Bptr) + (multi * this->_B_multi_stride) +
+ ((start_x / get_stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
+ (k0 * get_stripe_width<strategy, FixedFormat>::get()) :
+ _B_transposed + (rounded_width * _Ktotal * multi) + (k0 * rounded_width) + (start_x * kern_k);
unsigned int batch = batch_0;
unsigned int start_row = (start - (batch_0 * window_per_batch)) * strategy::out_height();
@@ -699,12 +775,12 @@ public:
}
// Perform the kernel and merge step, either separately or together as required.
- kernel_and_merge<MergeStep, OutputStage>::run(
+ kernel_and_merge<MergeStep, FixedFormat, OutputStage>::run(
#ifdef CYCLE_PROFILING
prof,
#endif
// Strategy and panel pointers
- strat, a_panel, b_ptr, c_panel,
+ strat, a_panel, b_ptr, this->_ldb, c_panel,
// Result buffer pointers
this->_Cptr + (batch * this->_C_batch_stride) + (multi * this->_C_multi_stride), this->_ldc,
// K size, and M/N ranges
@@ -802,6 +878,13 @@ public:
}
}
+ // For FixedFormat cases, figure out the B pointer. The loop below moves through batches and vertically through the output so this will be the same throughout.
+ if (FixedFormat) {
+ b_panel = reinterpret_cast<const Toi *>(this->_Bptr) + (current.multi() * this->_B_multi_stride) +
+ ((current.x0() / get_stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
+ (current.k0() * get_stripe_width<strategy, FixedFormat>::get());
+ }
+
/* Do the actual work. */
for (unsigned int batch = batch_0; batch <= batch_end; batch++) {
unsigned int first_m = (batch == batch_0) ? m_0 : 0;
@@ -840,12 +923,12 @@ public:
}
// Perform the kernel and merge step, either separately or together as required.
- kernel_and_merge<MergeStep, OutputStage>::run(
+ kernel_and_merge<MergeStep, FixedFormat, OutputStage>::run(
#ifdef CYCLE_PROFILING
prof,
#endif
// Strategy and panel pointers
- strat, a_ptr, b_panel, c_panel,
+ strat, a_ptr, b_panel, this->_ldb, c_panel,
// Result buffer pointers
result_ptr, this->_ldc,
// K size, and M/N ranges
@@ -863,7 +946,9 @@ public:
}
}
- b_panel += (roundup(current.xmax() - current.x0(), strategy::out_width()) * kern_k);
+ if (FixedFormat == false) {
+ b_panel += (roundup(current.xmax() - current.x0(), strategy::out_width()) * kern_k);
+ }
}
}
}
@@ -910,14 +995,18 @@ public:
// Interface implementation - pretransposed
bool B_is_pretransposed() const override {
- return true;
+ return (FixedFormat == false);
}
bool B_pretranspose_required() const override {
- return (_B_transposed==nullptr);
+ return (FixedFormat == false) && (_B_transposed==nullptr);
}
size_t get_B_pretransposed_array_size() const override {
+ if (FixedFormat) {
+ return 0;
+ }
+
unsigned int x_size = roundup(_Nsize, strategy::out_width());
return (x_size * _Ktotal * _nmulti * sizeof(Toi)) + get_col_sum_size();
@@ -939,7 +1028,7 @@ public:
void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
requantize_bias(in_buffer, B, ldb, B_multi_stride);
- // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
+ // Put the transposed data after the column sums - in non-quantized cases get_col_sum_size() == 0
uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
Toi *buffer = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
_B_transposed = buffer;
@@ -1005,7 +1094,7 @@ public:
}
void set_pretransposed_B_data(void *in_buffer) override {
- // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
+ // Put the transposed data after the column sums - in non-quantized cases get_col_sum_size() == 0
uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
_B_transposed = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
col_bias = reinterpret_cast<int32_t *>(in_buffer);
@@ -1065,6 +1154,7 @@ public:
c.inner_block_size = _k_block;
c.outer_block_size = _x_block;
c.filter = get_type_name<strategy>();
+ c.weight_format = get_weight_format(get_kernel_weight_format<strategy, FixedFormat, To>::get(), sizeof(To));
return c;
}
@@ -1074,6 +1164,9 @@ public:
template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
using GemmInterleavedNoMerge = GemmInterleaved<strategy, To, Tr, OutputStage, false>;
+template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
+using GemmInterleavedFixedFormat = GemmInterleaved<strategy, To, Tr, OutputStage, true, true>;
+
template<typename strategy, typename To, typename Tr>
using GemmInterleavedPretransposedNoMergeQuantizedInline = GemmInterleaved<strategy, To, Tr, Requantize32, false>;
diff --git a/src/core/NEON/kernels/arm_gemm/kernel_weight_format.hpp b/src/core/NEON/kernels/arm_gemm/kernel_weight_format.hpp
new file mode 100644
index 0000000000..6b89dd0d73
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernel_weight_format.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "arm_gemm.hpp"
+
+namespace arm_gemm {
+
+/* Internal enum to define the weight format a kernel is expecting.
+ *
+ * This is distinct from the "external" WeightFormat defined in arm_gemm.hpp primarily to allow for SVE, where
+ * internally kernels are defined in terms of multiples of the SVE vector length, but externally they are converted
+ * to a fixed format (based on the VL of the machine we are running on).
+ *
+ * Encoded as a bitfield:
+ * bit 0 : SVE flag
+ * bit 4 : BF16 convert flag (fast mode)
+ * bits 11-8 : block length (bytes)
+ * bits 15-12: vector count
+ */
+enum class KernelWeightFormat {
+ NON_FIXED = 0,
+ VL128_BL16 = 0x1200,
+ VL128_BL32 = 0x1400,
+ VL128_BL32_BF16 = 0x1410,
+ VL128_BL64 = 0x1800,
+ VL256_BL64 = 0x2800,
+ VL256_BL64_BF16 = 0x2810,
+ VL1VL_BL16 = 0x1201,
+ VL1VL_BL32 = 0x1401,
+ VL1VL_BL32_BF16 = 0x1411,
+ VL1VL_BL64 = 0x1801,
+ VL2VL_BL64 = 0x2801,
+ VL2VL_BL64_BF16 = 0x2811
+};
+
+WeightFormat get_weight_format(const KernelWeightFormat, size_t);
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp
new file mode 100644
index 0000000000..9a871d4b88
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<bfloat16>, \
+ size_t, size_t, \
+ const bfloat16 *, \
+ size_t, \
+ IndirectOutputArg<float>, \
+ const float *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffhybrid_bf16fp32_mmla_6x16( ARGLIST );
+
+class cls_a64_ffhybrid_bf16fp32_mmla_6x16
+{
+public:
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+ static unsigned int stripe_width()
+ {
+ return 4;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL256_BL64;
+ }
+
+ static unsigned int out_width()
+ {
+ return 16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, bfloat16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 37.09 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffhybrid_bf16fp32_mmla_6x16;
+ cls_a64_ffhybrid_bf16fp32_mmla_6x16(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
new file mode 100644
index 0000000000..ec93586f57
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
@@ -0,0 +1,3807 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+#include "../../bfloat.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void a64_ffhybrid_bf16fp32_mmla_6x16 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<bfloat16> A_arg,
+ size_t M, size_t N, const bfloat16 *B_ptr, size_t B_stride, IndirectOutputArg<float> output_arg,
+ const float *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const bfloat16 *B_ptr = {};
+ const bfloat16 *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 191f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 153f\n"
+ "beq 115f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 77f\n"
+ "beq 39f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 3f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v13.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "zip2 v14.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "add x14, x14, #0x40\n"
+ "zip2 v15.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "b 16f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 15f\n"
+ "cmp x13, #0x10\n"
+ "bge 13f\n"
+ "tbz x13, #3, 8f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 5f\n"
+ "ldr d16, [x12], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "b 12f\n"
+ "5:" // Height 1: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "b 12f\n"
+ "6:" // Height 1: Partial accumulate: partial_2_8
+ "tbz x13, #1, 7f\n"
+ "ldr d11, [x12], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "b 12f\n"
+ "7:" // Height 1: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "b 12f\n"
+ "8:" // Height 1: Partial accumulate: partial_4_0
+ "tbz x13, #2, 10f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 9f\n"
+ "ldr d10, [x12], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "b 12f\n"
+ "9:" // Height 1: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "b 12f\n"
+ "10:" // Height 1: Partial accumulate: partial_2_0
+ "tbz x13, #1, 11f\n"
+ "ldr d9, [x12], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "b 12f\n"
+ "11:" // Height 1: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
+ "12:" // Height 1: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 14f\n"
+ "13:" // Height 1: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "14:" // Height 1: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "b 16f\n"
+ "15:" // Height 1: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "16:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "17:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 18f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "b 19f\n"
+ "18:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "19:" // Height 1: input setup done
+ "cmp x26, #0x8\n"
+ "blt 22f\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q6, [x11, #0x10]\n"
+ "blt 21f\n"
+ "20:" // Height 1: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "ldr q1, [x25, #0x0]\n"
+ "add x11, x11, #0x40\n"
+ "ldr q7, [x11, #0x0]\n"
+ "add x10, x10, #0x40\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
+ "bge 20b\n"
+ "21:" // Height 1: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "add x25, x25, #0x10\n"
+ "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
+ "22:" // Height 1: Multiply loop: Main loop skip
+ "cbz x26, 27f\n"
+ "cmp x26, #0x4\n"
+ "blt 24f\n"
+ "23:" // Height 1: Multiply loop: Odd block loop
+ "ldr d1, [x25], #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr q7, [x11, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x9, #0x0]\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x9, #0x10]\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x4\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "bge 23b\n"
+ "24:" // Height 1: Multiply loop: Skip odd blocks
+ "cbz x26, 27f\n"
+ "tbz x26, #1, 25f\n"
+ "ldr s1, [x25], #0x4\n"
+ "tbz x26, #0, 26f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "b 26f\n"
+ "25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
+ "ldr h1, [x25, #0x0]\n"
+ "26:" // Height 1: Multiply loop: Ragged operand read: Done
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "27:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 17b\n"
+ "uzp1 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v9.2d, v9.2d, v13.2d\n"
+ "uzp1 v10.2d, v10.2d, v14.2d\n"
+ "uzp1 v11.2d, v11.2d, v15.2d\n"
+ "tbz %x[flags], #1, 28f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "28:" // Height 1: No activation
+ "cmp x13, #0x10\n"
+ "bge 37f\n"
+ "tbz x13, #3, 32f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 30f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 29f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "b 36f\n"
+ "29:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 36f\n"
+ "str s11, [x12, #0x0]\n"
+ "b 36f\n"
+ "30:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 31f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "b 36f\n"
+ "31:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 36f\n"
+ "str s10, [x12, #0x0]\n"
+ "b 36f\n"
+ "32:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 34f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 33f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "b 36f\n"
+ "33:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 36f\n"
+ "str s9, [x12, #0x0]\n"
+ "b 36f\n"
+ "34:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 35f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #0, 36f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "b 36f\n"
+ "35:" // Height 1: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "36:" // Height 1: Partial direct writeback: Done
+ "b 38f\n"
+ "37:" // Height 1: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "38:" // Height 1: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 2b\n"
+ "b 230f\n"
+ "39:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "40:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 41f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 41f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 41f\n"
+ "mov x10, x11\n"
+ "41:" // Height 2: B setup done
+ "cbz x14, 42f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v13.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "zip2 v14.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "add x14, x14, #0x40\n"
+ "zip2 v15.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "b 54f\n"
+ "42:" // Height 2: no bias
+ "tbz %x[flags], #0, 53f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x10\n"
+ "add x24, x12, x19, LSL #2\n"
+ "bge 51f\n"
+ "tbz x13, #3, 46f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 44f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 43f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "b 50f\n"
+ "43:" // Height 2: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 50f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "b 50f\n"
+ "44:" // Height 2: Partial accumulate: partial_2_8
+ "tbz x13, #1, 45f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "b 50f\n"
+ "45:" // Height 2: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 50f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "b 50f\n"
+ "46:" // Height 2: Partial accumulate: partial_4_0
+ "tbz x13, #2, 48f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 47f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "b 50f\n"
+ "47:" // Height 2: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 50f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "b 50f\n"
+ "48:" // Height 2: Partial accumulate: partial_2_0
+ "tbz x13, #1, 49f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 50f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "b 50f\n"
+ "49:" // Height 2: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "50:" // Height 2: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 52f\n"
+ "51:" // Height 2: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "52:" // Height 2: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "b 54f\n"
+ "53:" // Height 2: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "54:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "55:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 56f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 57f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "b 57f\n"
+ "56:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "57:" // Height 2: input setup done
+ "cmp x26, #0x8\n"
+ "blt 60f\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "blt 59f\n"
+ "58:" // Height 2: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ "add x11, x11, #0x40\n"
+ "ldr q7, [x11, #0x0]\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
+ "bge 58b\n"
+ "59:" // Height 2: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
+ "60:" // Height 2: Multiply loop: Main loop skip
+ "cbz x26, 65f\n"
+ "cmp x26, #0x4\n"
+ "blt 62f\n"
+ "61:" // Height 2: Multiply loop: Odd block loop
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "sub x26, x26, #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x11, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x9, #0x0]\n"
+ "ldr q7, [x9, #0x10]\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "cmp x26, #0x4\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "bge 61b\n"
+ "62:" // Height 2: Multiply loop: Skip odd blocks
+ "cbz x26, 65f\n"
+ "tbz x26, #1, 63f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "tbz x26, #0, 64f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "b 64f\n"
+ "63:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "64:" // Height 2: Multiply loop: Ragged operand read: Done
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "65:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 55b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "add x24, x12, x19, LSL #2\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "tbz %x[flags], #1, 66f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v7.4s, v7.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmax v7.4s, v7.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "66:" // Height 2: No activation
+ "cmp x13, #0x10\n"
+ "bge 75f\n"
+ "tbz x13, #3, 70f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 68f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 67f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "b 74f\n"
+ "67:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 74f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "b 74f\n"
+ "68:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 69f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "b 74f\n"
+ "69:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 74f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "b 74f\n"
+ "70:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 72f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 71f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "b 74f\n"
+ "71:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 74f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "b 74f\n"
+ "72:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 73f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "tbz x13, #0, 74f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "b 74f\n"
+ "73:" // Height 2: Partial direct writeback: partial_1_0
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "74:" // Height 2: Partial direct writeback: Done
+ "b 76f\n"
+ "75:" // Height 2: Full writeback
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "76:" // Height 2: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 40b\n"
+ "b 230f\n"
+ "77:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "78:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 79f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 79f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 79f\n"
+ "mov x10, x11\n"
+ "79:" // Height 3: B setup done
+ "cbz x14, 80f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v13.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "zip2 v14.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "add x14, x14, #0x40\n"
+ "zip2 v15.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "mov v21.16b, v13.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v22.16b, v14.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "b 92f\n"
+ "80:" // Height 3: no bias
+ "tbz %x[flags], #0, 91f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x23, x24, x19, LSL #2\n"
+ "bge 89f\n"
+ "tbz x13, #3, 84f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 82f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 81f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "b 88f\n"
+ "81:" // Height 3: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 88f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "b 88f\n"
+ "82:" // Height 3: Partial accumulate: partial_2_8
+ "tbz x13, #1, 83f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "b 88f\n"
+ "83:" // Height 3: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 88f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "b 88f\n"
+ "84:" // Height 3: Partial accumulate: partial_4_0
+ "tbz x13, #2, 86f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 85f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "b 88f\n"
+ "85:" // Height 3: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 88f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "b 88f\n"
+ "86:" // Height 3: Partial accumulate: partial_2_0
+ "tbz x13, #1, 87f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x13, #0, 88f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "b 88f\n"
+ "87:" // Height 3: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "88:" // Height 3: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 90f\n"
+ "89:" // Height 3: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "90:" // Height 3: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "b 92f\n"
+ "91:" // Height 3: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "92:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "93:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 94f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 95f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "b 95f\n"
+ "94:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "95:" // Height 3: input setup done
+ "cmp x26, #0x8\n"
+ "blt 98f\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "blt 97f\n"
+ "96:" // Height 3: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x11, #0x0]\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "bge 96b\n"
+ "97:" // Height 3: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ "98:" // Height 3: Multiply loop: Main loop skip
+ "cbz x26, 103f\n"
+ "cmp x26, #0x4\n"
+ "blt 100f\n"
+ "99:" // Height 3: Multiply loop: Odd block loop
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q7, [x11, #0x10]\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "sub x26, x26, #0x4\n"
+ ".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x0]\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ "cmp x26, #0x4\n"
+ ".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x10]\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ "bge 99b\n"
+ "100:" // Height 3: Multiply loop: Skip odd blocks
+ "cbz x26, 103f\n"
+ "tbz x26, #1, 101f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "tbz x26, #0, 102f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "b 102f\n"
+ "101:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "102:" // Height 3: Multiply loop: Ragged operand read: Done
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "103:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 93b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "uzp1 v16.2d, v16.2d, v20.2d\n"
+ "uzp1 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v19.2d, v19.2d, v23.2d\n"
+ "tbz %x[flags], #1, 104f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v7.4s, v7.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmax v7.4s, v7.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "104:" // Height 3: No activation
+ "cmp x13, #0x10\n"
+ "bge 113f\n"
+ "tbz x13, #3, 108f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 106f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 105f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "b 112f\n"
+ "105:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 112f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "b 112f\n"
+ "106:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 107f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "b 112f\n"
+ "107:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 112f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "b 112f\n"
+ "108:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 110f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 109f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "b 112f\n"
+ "109:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 112f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "b 112f\n"
+ "110:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 111f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x13, #0, 112f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "b 112f\n"
+ "111:" // Height 3: Partial direct writeback: partial_1_0
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "112:" // Height 3: Partial direct writeback: Done
+ "b 114f\n"
+ "113:" // Height 3: Full writeback
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "114:" // Height 3: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 78b\n"
+ "b 230f\n"
+ "115:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "116:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 117f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 117f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 117f\n"
+ "mov x10, x11\n"
+ "117:" // Height 4: B setup done
+ "cbz x14, 118f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v13.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "zip2 v14.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "add x14, x14, #0x40\n"
+ "zip2 v15.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "mov v21.16b, v13.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v22.16b, v14.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "b 130f\n"
+ "118:" // Height 4: no bias
+ "tbz %x[flags], #0, 129f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
+ "bge 127f\n"
+ "tbz x13, #3, 122f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 120f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 119f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "b 126f\n"
+ "119:" // Height 4: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 126f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "b 126f\n"
+ "120:" // Height 4: Partial accumulate: partial_2_8
+ "tbz x13, #1, 121f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "b 126f\n"
+ "121:" // Height 4: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 126f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "b 126f\n"
+ "122:" // Height 4: Partial accumulate: partial_4_0
+ "tbz x13, #2, 124f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 123f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "b 126f\n"
+ "123:" // Height 4: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 126f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "b 126f\n"
+ "124:" // Height 4: Partial accumulate: partial_2_0
+ "tbz x13, #1, 125f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x13, #0, 126f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "b 126f\n"
+ "125:" // Height 4: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "126:" // Height 4: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 128f\n"
+ "127:" // Height 4: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "128:" // Height 4: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "b 130f\n"
+ "129:" // Height 4: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "130:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "131:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 132f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 133f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "b 133f\n"
+ "132:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "133:" // Height 4: input setup done
+ "cmp x26, #0x8\n"
+ "blt 136f\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "blt 135f\n"
+ "134:" // Height 4: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "sub x26, x26, #0x8\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ "add x22, x22, #0x10\n"
+ "ldr q4, [x22, #0x0]\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x11, #0x0]\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "bge 134b\n"
+ "135:" // Height 4: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "sub x26, x26, #0x8\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ "136:" // Height 4: Multiply loop: Main loop skip
+ "cbz x26, 141f\n"
+ "cmp x26, #0x4\n"
+ "blt 138f\n"
+ "137:" // Height 4: Multiply loop: Odd block loop
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "cmp x26, #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x11, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ "bge 137b\n"
+ "138:" // Height 4: Multiply loop: Skip odd blocks
+ "cbz x26, 141f\n"
+ "tbz x26, #1, 139f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "tbz x26, #0, 140f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
+ "b 140f\n"
+ "139:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
+ "140:" // Height 4: Multiply loop: Ragged operand read: Done
+ "ldr q7, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "141:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 131b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "add x22, x23, x19, LSL #2\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "uzp1 v15.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "tbz %x[flags], #1, 142f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v7.4s, v7.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmax v7.4s, v7.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "142:" // Height 4: No activation
+ "cmp x13, #0x10\n"
+ "bge 151f\n"
+ "tbz x13, #3, 146f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 144f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 143f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "b 150f\n"
+ "143:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 150f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "b 150f\n"
+ "144:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 145f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "b 150f\n"
+ "145:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 150f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "b 150f\n"
+ "146:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 148f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 147f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "b 150f\n"
+ "147:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 150f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "b 150f\n"
+ "148:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 149f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x13, #0, 150f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "b 150f\n"
+ "149:" // Height 4: Partial direct writeback: partial_1_0
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "150:" // Height 4: Partial direct writeback: Done
+ "b 152f\n"
+ "151:" // Height 4: Full writeback
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "152:" // Height 4: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 116b\n"
+ "b 230f\n"
+ "153:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "154:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 155f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 155f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 155f\n"
+ "mov x10, x11\n"
+ "155:" // Height 5: B setup done
+ "cbz x14, 156f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v13.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "zip2 v14.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "add x14, x14, #0x40\n"
+ "zip2 v15.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "mov v21.16b, v13.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v22.16b, v14.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v28.16b, v12.16b\n"
+ "mov v25.16b, v9.16b\n"
+ "mov v29.16b, v13.16b\n"
+ "mov v26.16b, v10.16b\n"
+ "mov v30.16b, v14.16b\n"
+ "mov v27.16b, v11.16b\n"
+ "mov v31.16b, v15.16b\n"
+ "b 168f\n"
+ "156:" // Height 5: no bias
+ "tbz %x[flags], #0, 167f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x21, x22, x19, LSL #2\n"
+ "bge 165f\n"
+ "tbz x13, #3, 160f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 158f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 157f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
+ "b 164f\n"
+ "157:" // Height 5: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 164f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
+ "b 164f\n"
+ "158:" // Height 5: Partial accumulate: partial_2_8
+ "tbz x13, #1, 159f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "b 164f\n"
+ "159:" // Height 5: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 164f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "b 164f\n"
+ "160:" // Height 5: Partial accumulate: partial_4_0
+ "tbz x13, #2, 162f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 161f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "b 164f\n"
+ "161:" // Height 5: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 164f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "b 164f\n"
+ "162:" // Height 5: Partial accumulate: partial_2_0
+ "tbz x13, #1, 163f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #0, 164f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "b 164f\n"
+ "163:" // Height 5: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "164:" // Height 5: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 166f\n"
+ "165:" // Height 5: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "166:" // Height 5: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "zip1 v24.2d, v25.2d, v28.2d\n"
+ "zip2 v28.2d, v25.2d, v28.2d\n"
+ "zip1 v25.2d, v26.2d, v29.2d\n"
+ "zip2 v29.2d, v26.2d, v29.2d\n"
+ "zip1 v26.2d, v27.2d, v30.2d\n"
+ "zip2 v30.2d, v27.2d, v30.2d\n"
+ "zip1 v27.2d, v6.2d, v31.2d\n"
+ "zip2 v31.2d, v6.2d, v31.2d\n"
+ "b 168f\n"
+ "167:" // Height 5: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "168:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "169:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 170f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 171f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "b 171f\n"
+ "170:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "171:" // Height 5: input setup done
+ "cmp x26, #0x8\n"
+ "blt 174f\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "blt 173f\n"
+ "172:" // Height 5: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "sub x26, x26, #0x8\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "trn2 v5.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x11, #0x10]\n"
+ ".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "cmp x26, #0x10\n"
+ ".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ "ldr q4, [x22, #0x0]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x11, #0x0]\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ "ldr q3, [x23, #0x0]\n"
+ ".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
+ "ldr q5, [x21, #0x0]\n"
+ "bge 172b\n"
+ "173:" // Height 5: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "sub x26, x26, #0x8\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "trn2 v5.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x11, #0x10]\n"
+ ".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
+ "174:" // Height 5: Multiply loop: Main loop skip
+ "cbz x26, 179f\n"
+ "cmp x26, #0x4\n"
+ "blt 176f\n"
+ "175:" // Height 5: Multiply loop: Odd block loop
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
+ "trn1 v4.2d, v5.2d, v7.2d\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q7, [x11, #0x10]\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ "cmp x26, #0x4\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x0]\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9d // bfmmla v29.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x10]\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9a // bfmmla v26.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9e // bfmmla v30.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
+ "bge 175b\n"
+ "176:" // Height 5: Multiply loop: Skip odd blocks
+ "cbz x26, 179f\n"
+ "tbz x26, #1, 177f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "tbz x26, #0, 178f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
+ "ld1 { v5.h }[2], [x21]\n"
+ "b 178f\n"
+ "177:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
+ "ldr h5, [x21, #0x0]\n"
+ "178:" // Height 5: Multiply loop: Ragged operand read: Done
+ "ldr q7, [x11, #0x0]\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x11, #0x10]\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
+ "179:" // Height 5: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 169b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "add x22, x23, x19, LSL #2\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "uzp1 v15.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "uzp1 v24.2d, v24.2d, v28.2d\n"
+ "uzp1 v25.2d, v25.2d, v29.2d\n"
+ "uzp1 v26.2d, v26.2d, v30.2d\n"
+ "uzp1 v27.2d, v27.2d, v31.2d\n"
+ "tbz %x[flags], #1, 180f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v7.4s, v7.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v1.4s\n"
+ "fmax v7.4s, v7.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v0.4s\n"
+ "fmax v25.4s, v25.4s, v0.4s\n"
+ "fmax v26.4s, v26.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v0.4s\n"
+ "180:" // Height 5: No activation
+ "cmp x13, #0x10\n"
+ "bge 189f\n"
+ "tbz x13, #3, 184f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 182f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 181f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "b 188f\n"
+ "181:" // Height 5: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 188f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "b 188f\n"
+ "182:" // Height 5: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 183f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "b 188f\n"
+ "183:" // Height 5: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 188f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "b 188f\n"
+ "184:" // Height 5: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 186f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 185f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "b 188f\n"
+ "185:" // Height 5: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 188f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "b 188f\n"
+ "186:" // Height 5: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 187f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #0, 188f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "b 188f\n"
+ "187:" // Height 5: Partial direct writeback: partial_1_0
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "188:" // Height 5: Partial direct writeback: Done
+ "b 190f\n"
+ "189:" // Height 5: Full writeback
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "190:" // Height 5: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 154b\n"
+ "b 230f\n"
+ "191:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "192:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 193f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 193f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 193f\n"
+ "mov x10, x11\n"
+ "193:" // Height 6: B setup done
+ "cbz x14, 194f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v13.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "zip2 v14.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "add x14, x14, #0x40\n"
+ "zip2 v15.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v20.16b, v12.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "mov v21.16b, v13.16b\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v22.16b, v14.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v23.16b, v15.16b\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v28.16b, v12.16b\n"
+ "mov v25.16b, v9.16b\n"
+ "mov v29.16b, v13.16b\n"
+ "mov v26.16b, v10.16b\n"
+ "mov v30.16b, v14.16b\n"
+ "mov v27.16b, v11.16b\n"
+ "mov v31.16b, v15.16b\n"
+ "b 206f\n"
+ "194:" // Height 6: no bias
+ "tbz %x[flags], #0, 205f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x20, x21, x19, LSL #2\n"
+ "bge 203f\n"
+ "tbz x13, #3, 198f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 196f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 195f\n"
+ "ldr d16, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v16.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
+ "b 202f\n"
+ "195:" // Height 6: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 202f\n"
+ "ldr s16, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
+ "b 202f\n"
+ "196:" // Height 6: Partial accumulate: partial_2_8
+ "tbz x13, #1, 197f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
+ "b 202f\n"
+ "197:" // Height 6: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 202f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
+ "b 202f\n"
+ "198:" // Height 6: Partial accumulate: partial_4_0
+ "tbz x13, #2, 200f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 199f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
+ "b 202f\n"
+ "199:" // Height 6: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 202f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
+ "b 202f\n"
+ "200:" // Height 6: Partial accumulate: partial_2_0
+ "tbz x13, #1, 201f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
+ "b 202f\n"
+ "201:" // Height 6: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
+ "202:" // Height 6: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 204f\n"
+ "203:" // Height 6: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q16, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
+ "204:" // Height 6: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "zip1 v24.2d, v25.2d, v28.2d\n"
+ "zip2 v28.2d, v25.2d, v28.2d\n"
+ "zip1 v25.2d, v26.2d, v29.2d\n"
+ "zip2 v29.2d, v26.2d, v29.2d\n"
+ "zip1 v26.2d, v27.2d, v30.2d\n"
+ "zip2 v30.2d, v27.2d, v30.2d\n"
+ "zip1 v27.2d, v6.2d, v31.2d\n"
+ "zip2 v31.2d, v6.2d, v31.2d\n"
+ "b 206f\n"
+ "205:" // Height 6: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "206:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "207:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 208f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 209f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
+ "b 209f\n"
+ "208:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "209:" // Height 6: input setup done
+ "cmp x26, #0x8\n"
+ "blt 212f\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q6, [x20, #0x0]\n"
+ "ldr q7, [x11, #0x0]\n"
+ "blt 211f\n"
+ "210:" // Height 6: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "sub x26, x26, #0x8\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "cmp x26, #0x10\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "trn2 v5.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x11, #0x10]\n"
+ ".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ "ldr q4, [x22, #0x0]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x11, #0x0]\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ "ldr q3, [x23, #0x0]\n"
+ ".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q6, [x20, #0x0]\n"
+ "bge 210b\n"
+ "211:" // Height 6: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "sub x26, x26, #0x8\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "add x25, x25, #0x10\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "trn2 v5.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x11, #0x10]\n"
+ ".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x11, #0x20]\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x11, #0x30]\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
+ "add x11, x11, #0x40\n"
+ ".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x6e46ec2c // bfmmla v12.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec74 // bfmmla v20.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbc // bfmmla v28.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x10, #0x30]\n"
+ ".inst 0x6e47ec29 // bfmmla v9.4s, v1.8h, v7.8h\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x6e47ec71 // bfmmla v17.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecb9 // bfmmla v25.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x9, #0x20]\n"
+ ".inst 0x6e46ec2d // bfmmla v13.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec75 // bfmmla v21.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbd // bfmmla v29.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x9, #0x30]\n"
+ ".inst 0x6e47ec2a // bfmmla v10.4s, v1.8h, v7.8h\n"
+ "add x9, x9, #0x40\n"
+ ".inst 0x6e47ec72 // bfmmla v18.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecba // bfmmla v26.4s, v5.8h, v7.8h\n"
+ "ldr q7, [x28, #0x20]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec76 // bfmmla v22.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbe // bfmmla v30.4s, v5.8h, v6.8h\n"
+ "ldr q6, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6e47ec2b // bfmmla v11.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e47ec73 // bfmmla v19.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e47ecbb // bfmmla v27.4s, v5.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e46ec77 // bfmmla v23.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e46ecbf // bfmmla v31.4s, v5.8h, v6.8h\n"
+ "212:" // Height 6: Multiply loop: Main loop skip
+ "cbz x26, 217f\n"
+ "cmp x26, #0x4\n"
+ "blt 214f\n"
+ "213:" // Height 6: Multiply loop: Odd block loop
+ "ldr d1, [x25], #0x8\n"
+ "ldr d2, [x24], #0x8\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "sub x26, x26, #0x4\n"
+ "ldr d3, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "cmp x26, #0x4\n"
+ "ldr d5, [x21], #0x8\n"
+ "ldr d7, [x20], #0x8\n"
+ "trn1 v4.2d, v5.2d, v7.2d\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x11, #0x10]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x0]\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9d // bfmmla v29.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9a // bfmmla v26.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0e // bfmmla v14.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9e // bfmmla v30.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec53 // bfmmla v19.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9b // bfmmla v27.4s, v4.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9f // bfmmla v31.4s, v4.8h, v7.8h\n"
+ "bge 213b\n"
+ "214:" // Height 6: Multiply loop: Skip odd blocks
+ "cbz x26, 217f\n"
+ "tbz x26, #1, 215f\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s2, [x24], #0x4\n"
+ "ldr s3, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s5, [x21], #0x4\n"
+ "ldr s6, [x20], #0x4\n"
+ "tbz x26, #0, 216f\n"
+ "ld1 { v1.h }[2], [x25]\n"
+ "ld1 { v2.h }[2], [x24]\n"
+ "ld1 { v3.h }[2], [x23]\n"
+ "ld1 { v4.h }[2], [x22]\n"
+ "ld1 { v5.h }[2], [x21]\n"
+ "ld1 { v6.h }[2], [x20]\n"
+ "b 216f\n"
+ "215:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
+ "ldr h1, [x25, #0x0]\n"
+ "ldr h2, [x24, #0x0]\n"
+ "ldr h3, [x23, #0x0]\n"
+ "ldr h4, [x22, #0x0]\n"
+ "ldr h5, [x21, #0x0]\n"
+ "ldr h6, [x20, #0x0]\n"
+ "216:" // Height 6: Multiply loop: Ragged operand read: Done
+ "ldr q7, [x11, #0x0]\n"
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x11, #0x10]\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x9, #0x0]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x9, #0x10]\n"
+ ".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
+ "ldr q7, [x28, #0x0]\n"
+ ".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
+ "ldr q6, [x28, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
+ "217:" // Height 6: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 207b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "add x20, x21, x19, LSL #2\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "uzp1 v15.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "uzp1 v23.2d, v24.2d, v28.2d\n"
+ "uzp2 v24.2d, v24.2d, v28.2d\n"
+ "uzp1 v28.2d, v25.2d, v29.2d\n"
+ "uzp2 v25.2d, v25.2d, v29.2d\n"
+ "uzp1 v29.2d, v26.2d, v30.2d\n"
+ "uzp2 v26.2d, v26.2d, v30.2d\n"
+ "uzp1 v30.2d, v27.2d, v31.2d\n"
+ "uzp2 v27.2d, v27.2d, v31.2d\n"
+ "tbz %x[flags], #1, 218f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v7.4s, v7.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmin v28.4s, v28.4s, v1.4s\n"
+ "fmin v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v1.4s\n"
+ "fmax v7.4s, v7.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v0.4s\n"
+ "fmax v28.4s, v28.4s, v0.4s\n"
+ "fmax v29.4s, v29.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v0.4s\n"
+ "fmax v25.4s, v25.4s, v0.4s\n"
+ "fmax v26.4s, v26.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v0.4s\n"
+ "218:" // Height 6: No activation
+ "cmp x13, #0x10\n"
+ "bge 227f\n"
+ "tbz x13, #3, 222f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 220f\n"
+ "st1 { v13.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 219f\n"
+ "str d14, [x12], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "b 226f\n"
+ "219:" // Height 6: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 226f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
+ "b 226f\n"
+ "220:" // Height 6: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 221f\n"
+ "str d13, [x12], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "b 226f\n"
+ "221:" // Height 6: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 226f\n"
+ "str s13, [x12, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
+ "b 226f\n"
+ "222:" // Height 6: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 224f\n"
+ "st1 { v7.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 223f\n"
+ "str d12, [x12], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "b 226f\n"
+ "223:" // Height 6: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 226f\n"
+ "str s12, [x12, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
+ "b 226f\n"
+ "224:" // Height 6: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 225f\n"
+ "str d7, [x12], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x13, #0, 226f\n"
+ "st1 { v7.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
+ "b 226f\n"
+ "225:" // Height 6: Partial direct writeback: partial_1_0
+ "str s7, [x12, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
+ "226:" // Height 6: Partial direct writeback: Done
+ "b 228f\n"
+ "227:" // Height 6: Full writeback
+ "str q7, [x12, #0x0]\n"
+ "str q12, [x12, #0x10]\n"
+ "str q13, [x12, #0x20]\n"
+ "str q14, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q23, [x21, #0x0]\n"
+ "str q28, [x21, #0x10]\n"
+ "str q29, [x21, #0x20]\n"
+ "str q30, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
+ "228:" // Height 6: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 192b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 230f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 229f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "229:" // Update direct input
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "230:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp
new file mode 100644
index 0000000000..f7506e5123
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<__fp16>, \
+ size_t, size_t, \
+ const __fp16 *, \
+ size_t, \
+ IndirectOutputArg<__fp16>, \
+ const __fp16 *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffhybrid_fp16_mla_6x32( ARGLIST );
+
+class cls_a64_ffhybrid_fp16_mla_6x32
+{
+public:
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+ static unsigned int stripe_width()
+ {
+ return 8;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL128_BL16;
+ }
+
+ static unsigned int out_width()
+ {
+ return 32;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsFixed<rhs_operand_type, result_type, 6, 32, 1> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, __fp16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 29.14 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffhybrid_fp16_mla_6x32;
+ cls_a64_ffhybrid_fp16_mla_6x32(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
new file mode 100644
index 0000000000..e1458b39ab
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
@@ -0,0 +1,5429 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void a64_ffhybrid_fp16_mla_6x32 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<__fp16> A_arg,
+ size_t M, size_t N, const __fp16 *B_ptr, size_t B_stride, IndirectOutputArg<__fp16> output_arg,
+ const __fp16 *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ __fp16 maxval = static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const __fp16 *B_ptr = {};
+ const __fp16 *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<__fp16>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 251f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 201f\n"
+ "beq 151f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 101f\n"
+ "beq 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
+ "bgt 3f\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
+ "b 23f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 22f\n"
+ "cmp x13, #0x20\n"
+ "bge 21f\n"
+ "tbz x13, #4, 12f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "tbz x13, #3, 8f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ldr d11, [x12], #0x8\n"
+ "tbz x13, #1, 5f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "mov x19, #0x3c\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "b 20f\n"
+ "5:" // Height 1: Partial accumulate: partial_1_28
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "b 20f\n"
+ "6:" // Height 1: Partial accumulate: partial_2_24
+ "tbz x13, #1, 7f\n"
+ "ldr s11, [x12], #0x4\n"
+ "mov x19, #0x34\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "b 20f\n"
+ "7:" // Height 1: Partial accumulate: partial_1_24
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 20f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "b 20f\n"
+ "8:" // Height 1: Partial accumulate: partial_4_16
+ "tbz x13, #2, 10f\n"
+ "ldr d10, [x12], #0x8\n"
+ "tbz x13, #1, 9f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "mov x19, #0x2c\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "b 20f\n"
+ "9:" // Height 1: Partial accumulate: partial_1_20
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "b 20f\n"
+ "10:" // Height 1: Partial accumulate: partial_2_16
+ "tbz x13, #1, 11f\n"
+ "ldr s10, [x12], #0x4\n"
+ "mov x19, #0x24\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "b 20f\n"
+ "11:" // Height 1: Partial accumulate: partial_1_16
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 20f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "b 20f\n"
+ "12:" // Height 1: Partial accumulate: partial_8_0
+ "tbz x13, #3, 16f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 14f\n"
+ "ldr d9, [x12], #0x8\n"
+ "tbz x13, #1, 13f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "mov x19, #0x1c\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "b 20f\n"
+ "13:" // Height 1: Partial accumulate: partial_1_12
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "b 20f\n"
+ "14:" // Height 1: Partial accumulate: partial_2_8
+ "tbz x13, #1, 15f\n"
+ "ldr s9, [x12], #0x4\n"
+ "mov x19, #0x14\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "b 20f\n"
+ "15:" // Height 1: Partial accumulate: partial_1_8
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 20f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "b 20f\n"
+ "16:" // Height 1: Partial accumulate: partial_4_0
+ "tbz x13, #2, 18f\n"
+ "ldr d8, [x12], #0x8\n"
+ "tbz x13, #1, 17f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "mov x19, #0xc\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "b 20f\n"
+ "17:" // Height 1: Partial accumulate: partial_1_4
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "b 20f\n"
+ "18:" // Height 1: Partial accumulate: partial_2_0
+ "tbz x13, #1, 19f\n"
+ "ldr s8, [x12], #0x4\n"
+ "mov x19, #0x4\n"
+ "tbz x13, #0, 20f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "b 20f\n"
+ "19:" // Height 1: Partial accumulate: partial_1_0
+ "ldr h8, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
+ "20:" // Height 1: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 23f\n"
+ "21:" // Height 1: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "b 23f\n"
+ "22:" // Height 1: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "23:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "24:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 25f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 26f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "b 26f\n"
+ "25:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "26:" // Height 1: input setup done
+ "cmp x26, #0x8\n"
+ "blt 29f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 28f\n"
+ "27:" // Height 1: Multiply loop: Main loop head
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "add x25, x25, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
+ "add x11, x11, #0x80\n"
+ "ldr q6, [x11, #0x0]\n"
+ "add x10, x10, #0x80\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x9, x9, #0x80\n"
+ "add x28, x28, #0x80\n"
+ "bge 27b\n"
+ "28:" // Height 1: Multiply loop: Single iteration only
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "sub x26, x26, #0x8\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "add x25, x25, #0x10\n"
+ "add x11, x11, #0x80\n"
+ "add x10, x10, #0x80\n"
+ "add x9, x9, #0x80\n"
+ "add x28, x28, #0x80\n"
+ "29:" // Height 1: Multiply loop: Main loop skip
+ "cbz x26, 31f\n"
+ "30:" // Height 1: Multiply loop: Odd block loop
+ "ldr h0, [x25], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 30b\n"
+ "31:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 24b\n"
+ "tbz %x[flags], #1, 32f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v1.8h\n"
+ "fmin v9.8h, v9.8h, v1.8h\n"
+ "fmin v10.8h, v10.8h, v1.8h\n"
+ "fmin v11.8h, v11.8h, v1.8h\n"
+ "fmax v8.8h, v8.8h, v0.8h\n"
+ "fmax v9.8h, v9.8h, v0.8h\n"
+ "fmax v10.8h, v10.8h, v0.8h\n"
+ "fmax v11.8h, v11.8h, v0.8h\n"
+ "32:" // Height 1: No activation
+ "cmp x13, #0x20\n"
+ "bge 49f\n"
+ "tbz x13, #4, 40f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "tbz x13, #3, 36f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 34f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #1, 33f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "b 48f\n"
+ "33:" // Height 1: Partial direct writeback: partial_1_28
+ "tbz x13, #0, 48f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "b 48f\n"
+ "34:" // Height 1: Partial direct writeback: partial_2_24
+ "tbz x13, #1, 35f\n"
+ "str s11, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "b 48f\n"
+ "35:" // Height 1: Partial direct writeback: partial_1_24
+ "tbz x13, #0, 48f\n"
+ "str h11, [x12, #0x0]\n"
+ "b 48f\n"
+ "36:" // Height 1: Partial direct writeback: partial_4_16
+ "tbz x13, #2, 38f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #1, 37f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "b 48f\n"
+ "37:" // Height 1: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 48f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "b 48f\n"
+ "38:" // Height 1: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 39f\n"
+ "str s10, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "b 48f\n"
+ "39:" // Height 1: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 48f\n"
+ "str h10, [x12, #0x0]\n"
+ "b 48f\n"
+ "40:" // Height 1: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 44f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "tbz x13, #2, 42f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #1, 41f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "b 48f\n"
+ "41:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 48f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "b 48f\n"
+ "42:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 43f\n"
+ "str s9, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "b 48f\n"
+ "43:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 48f\n"
+ "str h9, [x12, #0x0]\n"
+ "b 48f\n"
+ "44:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 46f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #1, 45f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "b 48f\n"
+ "45:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 48f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "b 48f\n"
+ "46:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 47f\n"
+ "str s8, [x12], #0x4\n"
+ "tbz x13, #0, 48f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "b 48f\n"
+ "47:" // Height 1: Partial direct writeback: partial_1_0
+ "str h8, [x12, #0x0]\n"
+ "48:" // Height 1: Partial direct writeback: Done
+ "b 50f\n"
+ "49:" // Height 1: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "50:" // Height 1: Writeback done
+ "subs x13, x13, #0x20\n"
+ "bgt 2b\n"
+ "b 302f\n"
+ "51:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "52:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
+ "bgt 53f\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
+ "bgt 53f\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
+ "bgt 53f\n"
+ "mov x10, x11\n"
+ "53:" // Height 2: B setup done
+ "cbz x14, 54f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "add x14, x14, #0x40\n"
+ "b 73f\n"
+ "54:" // Height 2: no bias
+ "tbz %x[flags], #0, 72f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x20\n"
+ "add x24, x12, x19, LSL #1\n"
+ "bge 71f\n"
+ "tbz x13, #4, 62f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "tbz x13, #3, 58f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 56f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "tbz x13, #1, 55f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "b 70f\n"
+ "55:" // Height 2: Partial accumulate: partial_1_28
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "b 70f\n"
+ "56:" // Height 2: Partial accumulate: partial_2_24
+ "tbz x13, #1, 57f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "b 70f\n"
+ "57:" // Height 2: Partial accumulate: partial_1_24
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 70f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "b 70f\n"
+ "58:" // Height 2: Partial accumulate: partial_4_16
+ "tbz x13, #2, 60f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "tbz x13, #1, 59f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "b 70f\n"
+ "59:" // Height 2: Partial accumulate: partial_1_20
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "b 70f\n"
+ "60:" // Height 2: Partial accumulate: partial_2_16
+ "tbz x13, #1, 61f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "b 70f\n"
+ "61:" // Height 2: Partial accumulate: partial_1_16
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 70f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "b 70f\n"
+ "62:" // Height 2: Partial accumulate: partial_8_0
+ "tbz x13, #3, 66f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 64f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "tbz x13, #1, 63f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "b 70f\n"
+ "63:" // Height 2: Partial accumulate: partial_1_12
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "b 70f\n"
+ "64:" // Height 2: Partial accumulate: partial_2_8
+ "tbz x13, #1, 65f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "b 70f\n"
+ "65:" // Height 2: Partial accumulate: partial_1_8
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 70f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "b 70f\n"
+ "66:" // Height 2: Partial accumulate: partial_4_0
+ "tbz x13, #2, 68f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "tbz x13, #1, 67f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "b 70f\n"
+ "67:" // Height 2: Partial accumulate: partial_1_4
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "b 70f\n"
+ "68:" // Height 2: Partial accumulate: partial_2_0
+ "tbz x13, #1, 69f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "tbz x13, #0, 70f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "b 70f\n"
+ "69:" // Height 2: Partial accumulate: partial_1_0
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "70:" // Height 2: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 73f\n"
+ "71:" // Height 2: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "b 73f\n"
+ "72:" // Height 2: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "73:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "74:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 75f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 76f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "b 76f\n"
+ "75:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "76:" // Height 2: input setup done
+ "cmp x26, #0x8\n"
+ "blt 79f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 78f\n"
+ "77:" // Height 2: Multiply loop: Main loop head
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x8\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 77b\n"
+ "78:" // Height 2: Multiply loop: Single iteration only
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x8\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "79:" // Height 2: Multiply loop: Main loop skip
+ "cbz x26, 81f\n"
+ "80:" // Height 2: Multiply loop: Odd block loop
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "add x9, x9, #0x10\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 80b\n"
+ "81:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 74b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "tbz %x[flags], #1, 82f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v1.8h\n"
+ "fmin v9.8h, v9.8h, v1.8h\n"
+ "fmin v10.8h, v10.8h, v1.8h\n"
+ "fmin v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v1.8h\n"
+ "fmin v13.8h, v13.8h, v1.8h\n"
+ "fmin v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v1.8h\n"
+ "fmax v8.8h, v8.8h, v0.8h\n"
+ "fmax v9.8h, v9.8h, v0.8h\n"
+ "fmax v10.8h, v10.8h, v0.8h\n"
+ "fmax v11.8h, v11.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v0.8h\n"
+ "fmax v13.8h, v13.8h, v0.8h\n"
+ "fmax v14.8h, v14.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v0.8h\n"
+ "82:" // Height 2: No activation
+ "cmp x13, #0x20\n"
+ "bge 99f\n"
+ "tbz x13, #4, 90f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "tbz x13, #3, 86f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 84f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "tbz x13, #1, 83f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "b 98f\n"
+ "83:" // Height 2: Partial direct writeback: partial_1_28
+ "tbz x13, #0, 98f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "b 98f\n"
+ "84:" // Height 2: Partial direct writeback: partial_2_24
+ "tbz x13, #1, 85f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "b 98f\n"
+ "85:" // Height 2: Partial direct writeback: partial_1_24
+ "tbz x13, #0, 98f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "b 98f\n"
+ "86:" // Height 2: Partial direct writeback: partial_4_16
+ "tbz x13, #2, 88f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "tbz x13, #1, 87f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "b 98f\n"
+ "87:" // Height 2: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 98f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "b 98f\n"
+ "88:" // Height 2: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 89f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "b 98f\n"
+ "89:" // Height 2: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 98f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "b 98f\n"
+ "90:" // Height 2: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 94f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "tbz x13, #2, 92f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "tbz x13, #1, 91f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "b 98f\n"
+ "91:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 98f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "b 98f\n"
+ "92:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 93f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "b 98f\n"
+ "93:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 98f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "b 98f\n"
+ "94:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 96f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "tbz x13, #1, 95f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "b 98f\n"
+ "95:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 98f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "b 98f\n"
+ "96:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 97f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "tbz x13, #0, 98f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "b 98f\n"
+ "97:" // Height 2: Partial direct writeback: partial_1_0
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "98:" // Height 2: Partial direct writeback: Done
+ "b 100f\n"
+ "99:" // Height 2: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "100:" // Height 2: Writeback done
+ "subs x13, x13, #0x20\n"
+ "bgt 52b\n"
+ "b 302f\n"
+ "101:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "102:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
+ "bgt 103f\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
+ "bgt 103f\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
+ "bgt 103f\n"
+ "mov x10, x11\n"
+ "103:" // Height 3: B setup done
+ "cbz x14, 104f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "b 123f\n"
+ "104:" // Height 3: no bias
+ "tbz %x[flags], #0, 122f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x23, x24, x19, LSL #1\n"
+ "bge 121f\n"
+ "tbz x13, #4, 112f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "tbz x13, #3, 108f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 106f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x13, #1, 105f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "b 120f\n"
+ "105:" // Height 3: Partial accumulate: partial_1_28
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "b 120f\n"
+ "106:" // Height 3: Partial accumulate: partial_2_24
+ "tbz x13, #1, 107f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "b 120f\n"
+ "107:" // Height 3: Partial accumulate: partial_1_24
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 120f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "b 120f\n"
+ "108:" // Height 3: Partial accumulate: partial_4_16
+ "tbz x13, #2, 110f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x13, #1, 109f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "b 120f\n"
+ "109:" // Height 3: Partial accumulate: partial_1_20
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "b 120f\n"
+ "110:" // Height 3: Partial accumulate: partial_2_16
+ "tbz x13, #1, 111f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "b 120f\n"
+ "111:" // Height 3: Partial accumulate: partial_1_16
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 120f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "b 120f\n"
+ "112:" // Height 3: Partial accumulate: partial_8_0
+ "tbz x13, #3, 116f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 114f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x13, #1, 113f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "b 120f\n"
+ "113:" // Height 3: Partial accumulate: partial_1_12
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "b 120f\n"
+ "114:" // Height 3: Partial accumulate: partial_2_8
+ "tbz x13, #1, 115f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "b 120f\n"
+ "115:" // Height 3: Partial accumulate: partial_1_8
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 120f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "b 120f\n"
+ "116:" // Height 3: Partial accumulate: partial_4_0
+ "tbz x13, #2, 118f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "tbz x13, #1, 117f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "b 120f\n"
+ "117:" // Height 3: Partial accumulate: partial_1_4
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "b 120f\n"
+ "118:" // Height 3: Partial accumulate: partial_2_0
+ "tbz x13, #1, 119f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "tbz x13, #0, 120f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "b 120f\n"
+ "119:" // Height 3: Partial accumulate: partial_1_0
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
+ "120:" // Height 3: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 123f\n"
+ "121:" // Height 3: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "b 123f\n"
+ "122:" // Height 3: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "123:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "124:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 125f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 126f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "b 126f\n"
+ "125:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "126:" // Height 3: input setup done
+ "cmp x26, #0x8\n"
+ "blt 129f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 128f\n"
+ "127:" // Height 3: Multiply loop: Main loop head
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 127b\n"
+ "128:" // Height 3: Multiply loop: Single iteration only
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "129:" // Height 3: Multiply loop: Main loop skip
+ "cbz x26, 131f\n"
+ "130:" // Height 3: Multiply loop: Odd block loop
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "cbnz x26, 130b\n"
+ "131:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 124b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "tbz %x[flags], #1, 132f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v1.8h\n"
+ "fmin v9.8h, v9.8h, v1.8h\n"
+ "fmin v10.8h, v10.8h, v1.8h\n"
+ "fmin v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v1.8h\n"
+ "fmin v13.8h, v13.8h, v1.8h\n"
+ "fmin v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v1.8h\n"
+ "fmin v16.8h, v16.8h, v1.8h\n"
+ "fmin v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v1.8h\n"
+ "fmin v19.8h, v19.8h, v1.8h\n"
+ "fmax v8.8h, v8.8h, v0.8h\n"
+ "fmax v9.8h, v9.8h, v0.8h\n"
+ "fmax v10.8h, v10.8h, v0.8h\n"
+ "fmax v11.8h, v11.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v0.8h\n"
+ "fmax v13.8h, v13.8h, v0.8h\n"
+ "fmax v14.8h, v14.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v0.8h\n"
+ "fmax v16.8h, v16.8h, v0.8h\n"
+ "fmax v17.8h, v17.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v0.8h\n"
+ "fmax v19.8h, v19.8h, v0.8h\n"
+ "132:" // Height 3: No activation
+ "cmp x13, #0x20\n"
+ "bge 149f\n"
+ "tbz x13, #4, 140f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "tbz x13, #3, 136f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 134f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x13, #1, 133f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "b 148f\n"
+ "133:" // Height 3: Partial direct writeback: partial_1_28
+ "tbz x13, #0, 148f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "b 148f\n"
+ "134:" // Height 3: Partial direct writeback: partial_2_24
+ "tbz x13, #1, 135f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "b 148f\n"
+ "135:" // Height 3: Partial direct writeback: partial_1_24
+ "tbz x13, #0, 148f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "b 148f\n"
+ "136:" // Height 3: Partial direct writeback: partial_4_16
+ "tbz x13, #2, 138f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x13, #1, 137f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "b 148f\n"
+ "137:" // Height 3: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 148f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "b 148f\n"
+ "138:" // Height 3: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 139f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "b 148f\n"
+ "139:" // Height 3: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 148f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "b 148f\n"
+ "140:" // Height 3: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 144f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "tbz x13, #2, 142f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x13, #1, 141f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "b 148f\n"
+ "141:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 148f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "b 148f\n"
+ "142:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 143f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "b 148f\n"
+ "143:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 148f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "b 148f\n"
+ "144:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 146f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x13, #1, 145f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "b 148f\n"
+ "145:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 148f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "b 148f\n"
+ "146:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 147f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "tbz x13, #0, 148f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "b 148f\n"
+ "147:" // Height 3: Partial direct writeback: partial_1_0
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "148:" // Height 3: Partial direct writeback: Done
+ "b 150f\n"
+ "149:" // Height 3: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "150:" // Height 3: Writeback done
+ "subs x13, x13, #0x20\n"
+ "bgt 102b\n"
+ "b 302f\n"
+ "151:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "152:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
+ "bgt 153f\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
+ "bgt 153f\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
+ "bgt 153f\n"
+ "mov x10, x11\n"
+ "153:" // Height 4: B setup done
+ "cbz x14, 154f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "b 173f\n"
+ "154:" // Height 4: no bias
+ "tbz %x[flags], #0, 172f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x22, x23, x19, LSL #1\n"
+ "bge 171f\n"
+ "tbz x13, #4, 162f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "tbz x13, #3, 158f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 156f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x13, #1, 155f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "b 170f\n"
+ "155:" // Height 4: Partial accumulate: partial_1_28
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "b 170f\n"
+ "156:" // Height 4: Partial accumulate: partial_2_24
+ "tbz x13, #1, 157f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "b 170f\n"
+ "157:" // Height 4: Partial accumulate: partial_1_24
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 170f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "b 170f\n"
+ "158:" // Height 4: Partial accumulate: partial_4_16
+ "tbz x13, #2, 160f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x13, #1, 159f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
+ "b 170f\n"
+ "159:" // Height 4: Partial accumulate: partial_1_20
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
+ "b 170f\n"
+ "160:" // Height 4: Partial accumulate: partial_2_16
+ "tbz x13, #1, 161f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
+ "b 170f\n"
+ "161:" // Height 4: Partial accumulate: partial_1_16
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 170f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
+ "b 170f\n"
+ "162:" // Height 4: Partial accumulate: partial_8_0
+ "tbz x13, #3, 166f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 164f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x13, #1, 163f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "b 170f\n"
+ "163:" // Height 4: Partial accumulate: partial_1_12
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "b 170f\n"
+ "164:" // Height 4: Partial accumulate: partial_2_8
+ "tbz x13, #1, 165f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "b 170f\n"
+ "165:" // Height 4: Partial accumulate: partial_1_8
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 170f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "b 170f\n"
+ "166:" // Height 4: Partial accumulate: partial_4_0
+ "tbz x13, #2, 168f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x13, #1, 167f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
+ "b 170f\n"
+ "167:" // Height 4: Partial accumulate: partial_1_4
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
+ "b 170f\n"
+ "168:" // Height 4: Partial accumulate: partial_2_0
+ "tbz x13, #1, 169f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "tbz x13, #0, 170f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
+ "b 170f\n"
+ "169:" // Height 4: Partial accumulate: partial_1_0
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
+ "170:" // Height 4: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 173f\n"
+ "171:" // Height 4: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "b 173f\n"
+ "172:" // Height 4: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "173:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "174:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 175f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 176f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "b 176f\n"
+ "175:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "176:" // Height 4: input setup done
+ "cmp x26, #0x8\n"
+ "blt 179f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 178f\n"
+ "177:" // Height 4: Multiply loop: Main loop head
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "fmla v20.8h, v6.8h, v3.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "fmla v21.8h, v7.8h, v3.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "fmla v22.8h, v6.8h, v3.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "fmla v23.8h, v7.8h, v3.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "fmla v20.8h, v6.8h, v3.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "fmla v21.8h, v7.8h, v3.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "fmla v22.8h, v6.8h, v3.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "fmla v23.8h, v7.8h, v3.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "fmla v20.8h, v6.8h, v3.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "fmla v21.8h, v7.8h, v3.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "fmla v22.8h, v6.8h, v3.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "fmla v23.8h, v7.8h, v3.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "fmla v20.8h, v6.8h, v3.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "fmla v21.8h, v7.8h, v3.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "fmla v22.8h, v6.8h, v3.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "fmla v23.8h, v7.8h, v3.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "fmla v20.8h, v6.8h, v3.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "fmla v21.8h, v7.8h, v3.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "fmla v22.8h, v6.8h, v3.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "fmla v23.8h, v7.8h, v3.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "fmla v20.8h, v6.8h, v3.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "fmla v21.8h, v7.8h, v3.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "fmla v22.8h, v6.8h, v3.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "fmla v23.8h, v7.8h, v3.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "fmla v20.8h, v6.8h, v3.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "fmla v21.8h, v7.8h, v3.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "fmla v22.8h, v6.8h, v3.h[7]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "fmla v23.8h, v7.8h, v3.h[7]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 177b\n"
+ "178:" // Height 4: Multiply loop: Single iteration only
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "fmla v20.8h, v6.8h, v3.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "fmla v21.8h, v7.8h, v3.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "fmla v22.8h, v6.8h, v3.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "fmla v23.8h, v7.8h, v3.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "fmla v20.8h, v6.8h, v3.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "fmla v21.8h, v7.8h, v3.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "fmla v22.8h, v6.8h, v3.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "fmla v23.8h, v7.8h, v3.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "fmla v20.8h, v6.8h, v3.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "fmla v21.8h, v7.8h, v3.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "fmla v22.8h, v6.8h, v3.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "fmla v23.8h, v7.8h, v3.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "fmla v20.8h, v6.8h, v3.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "fmla v21.8h, v7.8h, v3.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "fmla v22.8h, v6.8h, v3.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "fmla v23.8h, v7.8h, v3.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "fmla v20.8h, v6.8h, v3.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "fmla v21.8h, v7.8h, v3.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "fmla v22.8h, v6.8h, v3.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "fmla v23.8h, v7.8h, v3.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "fmla v20.8h, v6.8h, v3.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "fmla v21.8h, v7.8h, v3.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "fmla v22.8h, v6.8h, v3.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "fmla v23.8h, v7.8h, v3.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "fmla v20.8h, v6.8h, v3.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "fmla v21.8h, v7.8h, v3.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "fmla v22.8h, v6.8h, v3.h[7]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "fmla v23.8h, v7.8h, v3.h[7]\n"
+ "179:" // Height 4: Multiply loop: Main loop skip
+ "cbz x26, 181f\n"
+ "180:" // Height 4: Multiply loop: Odd block loop
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "cbnz x26, 180b\n"
+ "181:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 174b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "tbz %x[flags], #1, 182f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v1.8h\n"
+ "fmin v9.8h, v9.8h, v1.8h\n"
+ "fmin v10.8h, v10.8h, v1.8h\n"
+ "fmin v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v1.8h\n"
+ "fmin v13.8h, v13.8h, v1.8h\n"
+ "fmin v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v1.8h\n"
+ "fmin v16.8h, v16.8h, v1.8h\n"
+ "fmin v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v1.8h\n"
+ "fmin v19.8h, v19.8h, v1.8h\n"
+ "fmin v20.8h, v20.8h, v1.8h\n"
+ "fmin v21.8h, v21.8h, v1.8h\n"
+ "fmin v22.8h, v22.8h, v1.8h\n"
+ "fmin v23.8h, v23.8h, v1.8h\n"
+ "fmax v8.8h, v8.8h, v0.8h\n"
+ "fmax v9.8h, v9.8h, v0.8h\n"
+ "fmax v10.8h, v10.8h, v0.8h\n"
+ "fmax v11.8h, v11.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v0.8h\n"
+ "fmax v13.8h, v13.8h, v0.8h\n"
+ "fmax v14.8h, v14.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v0.8h\n"
+ "fmax v16.8h, v16.8h, v0.8h\n"
+ "fmax v17.8h, v17.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v0.8h\n"
+ "fmax v19.8h, v19.8h, v0.8h\n"
+ "fmax v20.8h, v20.8h, v0.8h\n"
+ "fmax v21.8h, v21.8h, v0.8h\n"
+ "fmax v22.8h, v22.8h, v0.8h\n"
+ "fmax v23.8h, v23.8h, v0.8h\n"
+ "182:" // Height 4: No activation
+ "cmp x13, #0x20\n"
+ "bge 199f\n"
+ "tbz x13, #4, 190f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "tbz x13, #3, 186f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 184f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "tbz x13, #1, 183f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "b 198f\n"
+ "183:" // Height 4: Partial direct writeback: partial_1_28
+ "tbz x13, #0, 198f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "b 198f\n"
+ "184:" // Height 4: Partial direct writeback: partial_2_24
+ "tbz x13, #1, 185f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "b 198f\n"
+ "185:" // Height 4: Partial direct writeback: partial_1_24
+ "tbz x13, #0, 198f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
+ "b 198f\n"
+ "186:" // Height 4: Partial direct writeback: partial_4_16
+ "tbz x13, #2, 188f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "tbz x13, #1, 187f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
+ "b 198f\n"
+ "187:" // Height 4: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 198f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
+ "b 198f\n"
+ "188:" // Height 4: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 189f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
+ "b 198f\n"
+ "189:" // Height 4: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 198f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
+ "b 198f\n"
+ "190:" // Height 4: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 194f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "tbz x13, #2, 192f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "tbz x13, #1, 191f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "b 198f\n"
+ "191:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 198f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "b 198f\n"
+ "192:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 193f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "b 198f\n"
+ "193:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 198f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
+ "b 198f\n"
+ "194:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 196f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "tbz x13, #1, 195f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "b 198f\n"
+ "195:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 198f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "b 198f\n"
+ "196:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 197f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "tbz x13, #0, 198f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "b 198f\n"
+ "197:" // Height 4: Partial direct writeback: partial_1_0
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
+ "198:" // Height 4: Partial direct writeback: Done
+ "b 200f\n"
+ "199:" // Height 4: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "200:" // Height 4: Writeback done
+ "subs x13, x13, #0x20\n"
+ "bgt 152b\n"
+ "b 302f\n"
+ "201:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "202:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
+ "bgt 203f\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
+ "bgt 203f\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
+ "bgt 203f\n"
+ "mov x10, x11\n"
+ "203:" // Height 5: B setup done
+ "cbz x14, 204f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v25.16b, v9.16b\n"
+ "mov v26.16b, v10.16b\n"
+ "mov v27.16b, v11.16b\n"
+ "b 223f\n"
+ "204:" // Height 5: no bias
+ "tbz %x[flags], #0, 222f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x21, x22, x19, LSL #1\n"
+ "bge 221f\n"
+ "tbz x13, #4, 212f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "ld1 { v25.8h }, [x21], #0x10\n"
+ "tbz x13, #3, 208f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "ld1 { v26.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 206f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x13, #1, 205f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "ld1 { v27.h }[6], [x21]\n"
+ "b 220f\n"
+ "205:" // Height 5: Partial accumulate: partial_1_28
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "ld1 { v27.h }[4], [x21]\n"
+ "b 220f\n"
+ "206:" // Height 5: Partial accumulate: partial_2_24
+ "tbz x13, #1, 207f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v27.h }[2], [x21]\n"
+ "b 220f\n"
+ "207:" // Height 5: Partial accumulate: partial_1_24
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 220f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h27, [x21, #0x0]\n"
+ "b 220f\n"
+ "208:" // Height 5: Partial accumulate: partial_4_16
+ "tbz x13, #2, 210f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x13, #1, 209f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
+ "ld1 { v26.h }[6], [x21]\n"
+ "b 220f\n"
+ "209:" // Height 5: Partial accumulate: partial_1_20
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
+ "ld1 { v26.h }[4], [x21]\n"
+ "b 220f\n"
+ "210:" // Height 5: Partial accumulate: partial_2_16
+ "tbz x13, #1, 211f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
+ "ld1 { v26.h }[2], [x21]\n"
+ "b 220f\n"
+ "211:" // Height 5: Partial accumulate: partial_1_16
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 220f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
+ "ldr h26, [x21, #0x0]\n"
+ "b 220f\n"
+ "212:" // Height 5: Partial accumulate: partial_8_0
+ "tbz x13, #3, 216f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 214f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #1, 213f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v25.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v25.h }[6], [x21]\n"
+ "b 220f\n"
+ "213:" // Height 5: Partial accumulate: partial_1_12
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v25.h }[4], [x21]\n"
+ "b 220f\n"
+ "214:" // Height 5: Partial accumulate: partial_2_8
+ "tbz x13, #1, 215f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s25, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v25.h }[2], [x21]\n"
+ "b 220f\n"
+ "215:" // Height 5: Partial accumulate: partial_1_8
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 220f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h25, [x21, #0x0]\n"
+ "b 220f\n"
+ "216:" // Height 5: Partial accumulate: partial_4_0
+ "tbz x13, #2, 218f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "tbz x13, #1, 217f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "b 220f\n"
+ "217:" // Height 5: Partial accumulate: partial_1_4
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "b 220f\n"
+ "218:" // Height 5: Partial accumulate: partial_2_0
+ "tbz x13, #1, 219f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "tbz x13, #0, 220f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "b 220f\n"
+ "219:" // Height 5: Partial accumulate: partial_1_0
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "220:" // Height 5: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 223f\n"
+ "221:" // Height 5: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "b 223f\n"
+ "222:" // Height 5: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "223:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "224:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 225f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 226f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "b 226f\n"
+ "225:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "226:" // Height 5: input setup done
+ "cmp x26, #0x8\n"
+ "blt 229f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 228f\n"
+ "227:" // Height 5: Multiply loop: Main loop head
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla v24.8h, v6.8h, v4.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "fmla v25.8h, v7.8h, v4.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "fmla v26.8h, v6.8h, v4.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "fmla v27.8h, v7.8h, v4.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "fmla v20.8h, v6.8h, v3.h[1]\n"
+ "fmla v24.8h, v6.8h, v4.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "fmla v21.8h, v7.8h, v3.h[1]\n"
+ "fmla v25.8h, v7.8h, v4.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "fmla v22.8h, v6.8h, v3.h[1]\n"
+ "fmla v26.8h, v6.8h, v4.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "fmla v23.8h, v7.8h, v3.h[1]\n"
+ "fmla v27.8h, v7.8h, v4.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "fmla v20.8h, v6.8h, v3.h[2]\n"
+ "fmla v24.8h, v6.8h, v4.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "fmla v21.8h, v7.8h, v3.h[2]\n"
+ "fmla v25.8h, v7.8h, v4.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "fmla v22.8h, v6.8h, v3.h[2]\n"
+ "fmla v26.8h, v6.8h, v4.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "fmla v23.8h, v7.8h, v3.h[2]\n"
+ "fmla v27.8h, v7.8h, v4.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "fmla v20.8h, v6.8h, v3.h[3]\n"
+ "fmla v24.8h, v6.8h, v4.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "fmla v21.8h, v7.8h, v3.h[3]\n"
+ "fmla v25.8h, v7.8h, v4.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "fmla v22.8h, v6.8h, v3.h[3]\n"
+ "fmla v26.8h, v6.8h, v4.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "fmla v23.8h, v7.8h, v3.h[3]\n"
+ "fmla v27.8h, v7.8h, v4.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "fmla v20.8h, v6.8h, v3.h[4]\n"
+ "fmla v24.8h, v6.8h, v4.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "fmla v21.8h, v7.8h, v3.h[4]\n"
+ "fmla v25.8h, v7.8h, v4.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "fmla v22.8h, v6.8h, v3.h[4]\n"
+ "fmla v26.8h, v6.8h, v4.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "fmla v23.8h, v7.8h, v3.h[4]\n"
+ "fmla v27.8h, v7.8h, v4.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "fmla v20.8h, v6.8h, v3.h[5]\n"
+ "fmla v24.8h, v6.8h, v4.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "fmla v21.8h, v7.8h, v3.h[5]\n"
+ "fmla v25.8h, v7.8h, v4.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "fmla v22.8h, v6.8h, v3.h[5]\n"
+ "fmla v26.8h, v6.8h, v4.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "fmla v23.8h, v7.8h, v3.h[5]\n"
+ "fmla v27.8h, v7.8h, v4.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "fmla v20.8h, v6.8h, v3.h[6]\n"
+ "fmla v24.8h, v6.8h, v4.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "fmla v21.8h, v7.8h, v3.h[6]\n"
+ "fmla v25.8h, v7.8h, v4.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "fmla v22.8h, v6.8h, v3.h[6]\n"
+ "fmla v26.8h, v6.8h, v4.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "fmla v23.8h, v7.8h, v3.h[6]\n"
+ "fmla v27.8h, v7.8h, v4.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "fmla v20.8h, v6.8h, v3.h[7]\n"
+ "fmla v24.8h, v6.8h, v4.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "fmla v21.8h, v7.8h, v3.h[7]\n"
+ "fmla v25.8h, v7.8h, v4.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "fmla v22.8h, v6.8h, v3.h[7]\n"
+ "fmla v26.8h, v6.8h, v4.h[7]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "fmla v23.8h, v7.8h, v3.h[7]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "fmla v27.8h, v7.8h, v4.h[7]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 227b\n"
+ "228:" // Height 5: Multiply loop: Single iteration only
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "fmla v24.8h, v6.8h, v4.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "add x21, x21, #0x10\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "fmla v25.8h, v7.8h, v4.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "fmla v26.8h, v6.8h, v4.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "fmla v27.8h, v7.8h, v4.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "fmla v20.8h, v6.8h, v3.h[1]\n"
+ "fmla v24.8h, v6.8h, v4.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "fmla v21.8h, v7.8h, v3.h[1]\n"
+ "fmla v25.8h, v7.8h, v4.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "fmla v22.8h, v6.8h, v3.h[1]\n"
+ "fmla v26.8h, v6.8h, v4.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "fmla v23.8h, v7.8h, v3.h[1]\n"
+ "fmla v27.8h, v7.8h, v4.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "fmla v20.8h, v6.8h, v3.h[2]\n"
+ "fmla v24.8h, v6.8h, v4.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "fmla v21.8h, v7.8h, v3.h[2]\n"
+ "fmla v25.8h, v7.8h, v4.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "fmla v22.8h, v6.8h, v3.h[2]\n"
+ "fmla v26.8h, v6.8h, v4.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "fmla v23.8h, v7.8h, v3.h[2]\n"
+ "fmla v27.8h, v7.8h, v4.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "fmla v20.8h, v6.8h, v3.h[3]\n"
+ "fmla v24.8h, v6.8h, v4.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "fmla v21.8h, v7.8h, v3.h[3]\n"
+ "fmla v25.8h, v7.8h, v4.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "fmla v22.8h, v6.8h, v3.h[3]\n"
+ "fmla v26.8h, v6.8h, v4.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "fmla v23.8h, v7.8h, v3.h[3]\n"
+ "fmla v27.8h, v7.8h, v4.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "fmla v20.8h, v6.8h, v3.h[4]\n"
+ "fmla v24.8h, v6.8h, v4.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "fmla v21.8h, v7.8h, v3.h[4]\n"
+ "fmla v25.8h, v7.8h, v4.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "fmla v22.8h, v6.8h, v3.h[4]\n"
+ "fmla v26.8h, v6.8h, v4.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "fmla v23.8h, v7.8h, v3.h[4]\n"
+ "fmla v27.8h, v7.8h, v4.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "fmla v20.8h, v6.8h, v3.h[5]\n"
+ "fmla v24.8h, v6.8h, v4.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "fmla v21.8h, v7.8h, v3.h[5]\n"
+ "fmla v25.8h, v7.8h, v4.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "fmla v22.8h, v6.8h, v3.h[5]\n"
+ "fmla v26.8h, v6.8h, v4.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "fmla v23.8h, v7.8h, v3.h[5]\n"
+ "fmla v27.8h, v7.8h, v4.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "fmla v20.8h, v6.8h, v3.h[6]\n"
+ "fmla v24.8h, v6.8h, v4.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "fmla v21.8h, v7.8h, v3.h[6]\n"
+ "fmla v25.8h, v7.8h, v4.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "fmla v22.8h, v6.8h, v3.h[6]\n"
+ "fmla v26.8h, v6.8h, v4.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "fmla v23.8h, v7.8h, v3.h[6]\n"
+ "fmla v27.8h, v7.8h, v4.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "fmla v20.8h, v6.8h, v3.h[7]\n"
+ "fmla v24.8h, v6.8h, v4.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "fmla v21.8h, v7.8h, v3.h[7]\n"
+ "fmla v25.8h, v7.8h, v4.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "fmla v22.8h, v6.8h, v3.h[7]\n"
+ "fmla v26.8h, v6.8h, v4.h[7]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "fmla v23.8h, v7.8h, v3.h[7]\n"
+ "fmla v27.8h, v7.8h, v4.h[7]\n"
+ "229:" // Height 5: Multiply loop: Main loop skip
+ "cbz x26, 231f\n"
+ "230:" // Height 5: Multiply loop: Odd block loop
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v24.8h, v6.8h, v4.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "fmla v25.8h, v7.8h, v4.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "fmla v26.8h, v6.8h, v4.h[0]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "fmla v27.8h, v7.8h, v4.h[0]\n"
+ "cbnz x26, 230b\n"
+ "231:" // Height 5: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 224b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "tbz %x[flags], #1, 232f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v1.8h\n"
+ "fmin v9.8h, v9.8h, v1.8h\n"
+ "fmin v10.8h, v10.8h, v1.8h\n"
+ "fmin v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v1.8h\n"
+ "fmin v13.8h, v13.8h, v1.8h\n"
+ "fmin v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v1.8h\n"
+ "fmin v16.8h, v16.8h, v1.8h\n"
+ "fmin v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v1.8h\n"
+ "fmin v19.8h, v19.8h, v1.8h\n"
+ "fmin v20.8h, v20.8h, v1.8h\n"
+ "fmin v21.8h, v21.8h, v1.8h\n"
+ "fmin v22.8h, v22.8h, v1.8h\n"
+ "fmin v23.8h, v23.8h, v1.8h\n"
+ "fmin v24.8h, v24.8h, v1.8h\n"
+ "fmin v25.8h, v25.8h, v1.8h\n"
+ "fmin v26.8h, v26.8h, v1.8h\n"
+ "fmin v27.8h, v27.8h, v1.8h\n"
+ "fmax v8.8h, v8.8h, v0.8h\n"
+ "fmax v9.8h, v9.8h, v0.8h\n"
+ "fmax v10.8h, v10.8h, v0.8h\n"
+ "fmax v11.8h, v11.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v0.8h\n"
+ "fmax v13.8h, v13.8h, v0.8h\n"
+ "fmax v14.8h, v14.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v0.8h\n"
+ "fmax v16.8h, v16.8h, v0.8h\n"
+ "fmax v17.8h, v17.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v0.8h\n"
+ "fmax v19.8h, v19.8h, v0.8h\n"
+ "fmax v20.8h, v20.8h, v0.8h\n"
+ "fmax v21.8h, v21.8h, v0.8h\n"
+ "fmax v22.8h, v22.8h, v0.8h\n"
+ "fmax v23.8h, v23.8h, v0.8h\n"
+ "fmax v24.8h, v24.8h, v0.8h\n"
+ "fmax v25.8h, v25.8h, v0.8h\n"
+ "fmax v26.8h, v26.8h, v0.8h\n"
+ "fmax v27.8h, v27.8h, v0.8h\n"
+ "232:" // Height 5: No activation
+ "cmp x13, #0x20\n"
+ "bge 249f\n"
+ "tbz x13, #4, 240f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v25.8h }, [x21], #0x10\n"
+ "tbz x13, #3, 236f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "st1 { v26.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 234f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x13, #1, 233f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "st1 { v27.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "st1 { v27.h }[6], [x21]\n"
+ "b 248f\n"
+ "233:" // Height 5: Partial direct writeback: partial_1_28
+ "tbz x13, #0, 248f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "st1 { v27.h }[4], [x21]\n"
+ "b 248f\n"
+ "234:" // Height 5: Partial direct writeback: partial_2_24
+ "tbz x13, #1, 235f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "str s27, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "st1 { v27.h }[2], [x21]\n"
+ "b 248f\n"
+ "235:" // Height 5: Partial direct writeback: partial_1_24
+ "tbz x13, #0, 248f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
+ "str h27, [x21, #0x0]\n"
+ "b 248f\n"
+ "236:" // Height 5: Partial direct writeback: partial_4_16
+ "tbz x13, #2, 238f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x13, #1, 237f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "st1 { v26.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
+ "st1 { v26.h }[6], [x21]\n"
+ "b 248f\n"
+ "237:" // Height 5: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 248f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
+ "st1 { v26.h }[4], [x21]\n"
+ "b 248f\n"
+ "238:" // Height 5: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 239f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "str s26, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
+ "st1 { v26.h }[2], [x21]\n"
+ "b 248f\n"
+ "239:" // Height 5: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 248f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
+ "str h26, [x21, #0x0]\n"
+ "b 248f\n"
+ "240:" // Height 5: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 244f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "tbz x13, #2, 242f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #1, 241f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "st1 { v25.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "st1 { v25.h }[6], [x21]\n"
+ "b 248f\n"
+ "241:" // Height 5: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 248f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "st1 { v25.h }[4], [x21]\n"
+ "b 248f\n"
+ "242:" // Height 5: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 243f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "str s25, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "st1 { v25.h }[2], [x21]\n"
+ "b 248f\n"
+ "243:" // Height 5: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 248f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
+ "str h25, [x21, #0x0]\n"
+ "b 248f\n"
+ "244:" // Height 5: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 246f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #1, 245f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v24.s }[2], [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "st1 { v24.h }[6], [x21]\n"
+ "b 248f\n"
+ "245:" // Height 5: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 248f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "st1 { v24.h }[4], [x21]\n"
+ "b 248f\n"
+ "246:" // Height 5: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 247f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "str s24, [x21], #0x4\n"
+ "tbz x13, #0, 248f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "st1 { v24.h }[2], [x21]\n"
+ "b 248f\n"
+ "247:" // Height 5: Partial direct writeback: partial_1_0
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
+ "str h24, [x21, #0x0]\n"
+ "248:" // Height 5: Partial direct writeback: Done
+ "b 250f\n"
+ "249:" // Height 5: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "250:" // Height 5: Writeback done
+ "subs x13, x13, #0x20\n"
+ "bgt 202b\n"
+ "b 302f\n"
+ "251:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0xc\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "252:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x18\n"
+ "bgt 253f\n"
+ "cmp x13, #0x10\n"
+ "mov x28, x11\n"
+ "bgt 253f\n"
+ "cmp x13, #0x8\n"
+ "mov x9, x11\n"
+ "bgt 253f\n"
+ "mov x10, x11\n"
+ "253:" // Height 6: B setup done
+ "cbz x14, 254f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v25.16b, v9.16b\n"
+ "mov v26.16b, v10.16b\n"
+ "mov v27.16b, v11.16b\n"
+ "mov v28.16b, v8.16b\n"
+ "mov v29.16b, v9.16b\n"
+ "mov v30.16b, v10.16b\n"
+ "mov v31.16b, v11.16b\n"
+ "b 273f\n"
+ "254:" // Height 6: no bias
+ "tbz %x[flags], #0, 272f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "cmp x13, #0x20\n"
+ "add x20, x21, x19, LSL #1\n"
+ "bge 271f\n"
+ "tbz x13, #4, 262f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v28.8h }, [x20], #0x10\n"
+ "ld1 { v9.8h }, [x12], #0x10\n"
+ "ld1 { v13.8h }, [x24], #0x10\n"
+ "ld1 { v17.8h }, [x23], #0x10\n"
+ "ld1 { v21.8h }, [x22], #0x10\n"
+ "ld1 { v25.8h }, [x21], #0x10\n"
+ "ld1 { v29.8h }, [x20], #0x10\n"
+ "tbz x13, #3, 258f\n"
+ "ld1 { v10.8h }, [x12], #0x10\n"
+ "ld1 { v14.8h }, [x24], #0x10\n"
+ "ld1 { v18.8h }, [x23], #0x10\n"
+ "ld1 { v22.8h }, [x22], #0x10\n"
+ "ld1 { v26.8h }, [x21], #0x10\n"
+ "ld1 { v30.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 256f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #1, 255f\n"
+ "ld1 { v11.s }[2], [x12], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "mov x19, #0x3c\n"
+ "ld1 { v19.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
+ "ld1 { v31.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v11.h }[6], [x12]\n"
+ "ld1 { v15.h }[6], [x24]\n"
+ "ld1 { v19.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "ld1 { v27.h }[6], [x21]\n"
+ "ld1 { v31.h }[6], [x20]\n"
+ "b 270f\n"
+ "255:" // Height 6: Partial accumulate: partial_1_28
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v11.h }[4], [x12]\n"
+ "ld1 { v15.h }[4], [x24]\n"
+ "ld1 { v19.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "ld1 { v27.h }[4], [x21]\n"
+ "ld1 { v31.h }[4], [x20]\n"
+ "b 270f\n"
+ "256:" // Height 6: Partial accumulate: partial_2_24
+ "tbz x13, #1, 257f\n"
+ "ldr s11, [x12], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "mov x19, #0x34\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s27, [x21], #0x4\n"
+ "ldr s31, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v11.h }[2], [x12]\n"
+ "ld1 { v15.h }[2], [x24]\n"
+ "ld1 { v19.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v27.h }[2], [x21]\n"
+ "ld1 { v31.h }[2], [x20]\n"
+ "b 270f\n"
+ "257:" // Height 6: Partial accumulate: partial_1_24
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 270f\n"
+ "ldr h11, [x12, #0x0]\n"
+ "ldr h15, [x24, #0x0]\n"
+ "ldr h19, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h27, [x21, #0x0]\n"
+ "ldr h31, [x20, #0x0]\n"
+ "b 270f\n"
+ "258:" // Height 6: Partial accumulate: partial_4_16
+ "tbz x13, #2, 260f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #1, 259f\n"
+ "ld1 { v10.s }[2], [x12], #0x4\n"
+ "ld1 { v14.s }[2], [x24], #0x4\n"
+ "mov x19, #0x2c\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v22.s }[2], [x22], #0x4\n"
+ "ld1 { v26.s }[2], [x21], #0x4\n"
+ "ld1 { v30.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v10.h }[6], [x12]\n"
+ "ld1 { v14.h }[6], [x24]\n"
+ "ld1 { v18.h }[6], [x23]\n"
+ "ld1 { v22.h }[6], [x22]\n"
+ "ld1 { v26.h }[6], [x21]\n"
+ "ld1 { v30.h }[6], [x20]\n"
+ "b 270f\n"
+ "259:" // Height 6: Partial accumulate: partial_1_20
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v10.h }[4], [x12]\n"
+ "ld1 { v14.h }[4], [x24]\n"
+ "ld1 { v18.h }[4], [x23]\n"
+ "ld1 { v22.h }[4], [x22]\n"
+ "ld1 { v26.h }[4], [x21]\n"
+ "ld1 { v30.h }[4], [x20]\n"
+ "b 270f\n"
+ "260:" // Height 6: Partial accumulate: partial_2_16
+ "tbz x13, #1, 261f\n"
+ "ldr s10, [x12], #0x4\n"
+ "ldr s14, [x24], #0x4\n"
+ "mov x19, #0x24\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s22, [x22], #0x4\n"
+ "ldr s26, [x21], #0x4\n"
+ "ldr s30, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v10.h }[2], [x12]\n"
+ "ld1 { v14.h }[2], [x24]\n"
+ "ld1 { v18.h }[2], [x23]\n"
+ "ld1 { v22.h }[2], [x22]\n"
+ "ld1 { v26.h }[2], [x21]\n"
+ "ld1 { v30.h }[2], [x20]\n"
+ "b 270f\n"
+ "261:" // Height 6: Partial accumulate: partial_1_16
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 270f\n"
+ "ldr h10, [x12, #0x0]\n"
+ "ldr h14, [x24, #0x0]\n"
+ "ldr h18, [x23, #0x0]\n"
+ "ldr h22, [x22, #0x0]\n"
+ "ldr h26, [x21, #0x0]\n"
+ "ldr h30, [x20, #0x0]\n"
+ "b 270f\n"
+ "262:" // Height 6: Partial accumulate: partial_8_0
+ "tbz x13, #3, 266f\n"
+ "ld1 { v8.8h }, [x12], #0x10\n"
+ "ld1 { v12.8h }, [x24], #0x10\n"
+ "ld1 { v16.8h }, [x23], #0x10\n"
+ "ld1 { v20.8h }, [x22], #0x10\n"
+ "ld1 { v24.8h }, [x21], #0x10\n"
+ "ld1 { v28.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 264f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #1, 263f\n"
+ "ld1 { v9.s }[2], [x12], #0x4\n"
+ "ld1 { v13.s }[2], [x24], #0x4\n"
+ "mov x19, #0x1c\n"
+ "ld1 { v17.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v25.s }[2], [x21], #0x4\n"
+ "ld1 { v29.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v9.h }[6], [x12]\n"
+ "ld1 { v13.h }[6], [x24]\n"
+ "ld1 { v17.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v25.h }[6], [x21]\n"
+ "ld1 { v29.h }[6], [x20]\n"
+ "b 270f\n"
+ "263:" // Height 6: Partial accumulate: partial_1_12
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v9.h }[4], [x12]\n"
+ "ld1 { v13.h }[4], [x24]\n"
+ "ld1 { v17.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v25.h }[4], [x21]\n"
+ "ld1 { v29.h }[4], [x20]\n"
+ "b 270f\n"
+ "264:" // Height 6: Partial accumulate: partial_2_8
+ "tbz x13, #1, 265f\n"
+ "ldr s9, [x12], #0x4\n"
+ "ldr s13, [x24], #0x4\n"
+ "mov x19, #0x14\n"
+ "ldr s17, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s25, [x21], #0x4\n"
+ "ldr s29, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v9.h }[2], [x12]\n"
+ "ld1 { v13.h }[2], [x24]\n"
+ "ld1 { v17.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v25.h }[2], [x21]\n"
+ "ld1 { v29.h }[2], [x20]\n"
+ "b 270f\n"
+ "265:" // Height 6: Partial accumulate: partial_1_8
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 270f\n"
+ "ldr h9, [x12, #0x0]\n"
+ "ldr h13, [x24, #0x0]\n"
+ "ldr h17, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h25, [x21, #0x0]\n"
+ "ldr h29, [x20, #0x0]\n"
+ "b 270f\n"
+ "266:" // Height 6: Partial accumulate: partial_4_0
+ "tbz x13, #2, 268f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #1, 267f\n"
+ "ld1 { v8.s }[2], [x12], #0x4\n"
+ "ld1 { v12.s }[2], [x24], #0x4\n"
+ "mov x19, #0xc\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "ld1 { v20.s }[2], [x22], #0x4\n"
+ "ld1 { v24.s }[2], [x21], #0x4\n"
+ "ld1 { v28.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v8.h }[6], [x12]\n"
+ "ld1 { v12.h }[6], [x24]\n"
+ "ld1 { v16.h }[6], [x23]\n"
+ "ld1 { v20.h }[6], [x22]\n"
+ "ld1 { v24.h }[6], [x21]\n"
+ "ld1 { v28.h }[6], [x20]\n"
+ "b 270f\n"
+ "267:" // Height 6: Partial accumulate: partial_1_4
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v8.h }[4], [x12]\n"
+ "ld1 { v12.h }[4], [x24]\n"
+ "ld1 { v16.h }[4], [x23]\n"
+ "ld1 { v20.h }[4], [x22]\n"
+ "ld1 { v24.h }[4], [x21]\n"
+ "ld1 { v28.h }[4], [x20]\n"
+ "b 270f\n"
+ "268:" // Height 6: Partial accumulate: partial_2_0
+ "tbz x13, #1, 269f\n"
+ "ldr s8, [x12], #0x4\n"
+ "ldr s12, [x24], #0x4\n"
+ "mov x19, #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s24, [x21], #0x4\n"
+ "ldr s28, [x20], #0x4\n"
+ "tbz x13, #0, 270f\n"
+ "ld1 { v8.h }[2], [x12]\n"
+ "ld1 { v12.h }[2], [x24]\n"
+ "ld1 { v16.h }[2], [x23]\n"
+ "ld1 { v20.h }[2], [x22]\n"
+ "ld1 { v24.h }[2], [x21]\n"
+ "ld1 { v28.h }[2], [x20]\n"
+ "b 270f\n"
+ "269:" // Height 6: Partial accumulate: partial_1_0
+ "ldr h8, [x12, #0x0]\n"
+ "ldr h12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr h16, [x23, #0x0]\n"
+ "ldr h20, [x22, #0x0]\n"
+ "ldr h24, [x21, #0x0]\n"
+ "ldr h28, [x20, #0x0]\n"
+ "270:" // Height 6: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 273f\n"
+ "271:" // Height 6: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
+ "b 273f\n"
+ "272:" // Height 6: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "273:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "274:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 275f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 276f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
+ "b 276f\n"
+ "275:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "276:" // Height 6: input setup done
+ "cmp x26, #0x8\n"
+ "blt 279f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x10\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 278f\n"
+ "277:" // Height 6: Multiply loop: Main loop head
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla v24.8h, v6.8h, v4.h[0]\n"
+ "fmla v28.8h, v6.8h, v5.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "add x20, x20, #0x10\n"
+ "fmla v25.8h, v7.8h, v4.h[0]\n"
+ "fmla v29.8h, v7.8h, v5.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "fmla v26.8h, v6.8h, v4.h[0]\n"
+ "fmla v30.8h, v6.8h, v5.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "fmla v27.8h, v7.8h, v4.h[0]\n"
+ "fmla v31.8h, v7.8h, v5.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "fmla v20.8h, v6.8h, v3.h[1]\n"
+ "fmla v24.8h, v6.8h, v4.h[1]\n"
+ "fmla v28.8h, v6.8h, v5.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "fmla v21.8h, v7.8h, v3.h[1]\n"
+ "fmla v25.8h, v7.8h, v4.h[1]\n"
+ "fmla v29.8h, v7.8h, v5.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "fmla v22.8h, v6.8h, v3.h[1]\n"
+ "fmla v26.8h, v6.8h, v4.h[1]\n"
+ "fmla v30.8h, v6.8h, v5.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "fmla v23.8h, v7.8h, v3.h[1]\n"
+ "fmla v27.8h, v7.8h, v4.h[1]\n"
+ "fmla v31.8h, v7.8h, v5.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "fmla v20.8h, v6.8h, v3.h[2]\n"
+ "fmla v24.8h, v6.8h, v4.h[2]\n"
+ "fmla v28.8h, v6.8h, v5.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "fmla v21.8h, v7.8h, v3.h[2]\n"
+ "fmla v25.8h, v7.8h, v4.h[2]\n"
+ "fmla v29.8h, v7.8h, v5.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "fmla v22.8h, v6.8h, v3.h[2]\n"
+ "fmla v26.8h, v6.8h, v4.h[2]\n"
+ "fmla v30.8h, v6.8h, v5.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "fmla v23.8h, v7.8h, v3.h[2]\n"
+ "fmla v27.8h, v7.8h, v4.h[2]\n"
+ "fmla v31.8h, v7.8h, v5.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "fmla v20.8h, v6.8h, v3.h[3]\n"
+ "fmla v24.8h, v6.8h, v4.h[3]\n"
+ "fmla v28.8h, v6.8h, v5.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "fmla v21.8h, v7.8h, v3.h[3]\n"
+ "fmla v25.8h, v7.8h, v4.h[3]\n"
+ "fmla v29.8h, v7.8h, v5.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "fmla v22.8h, v6.8h, v3.h[3]\n"
+ "fmla v26.8h, v6.8h, v4.h[3]\n"
+ "fmla v30.8h, v6.8h, v5.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "fmla v23.8h, v7.8h, v3.h[3]\n"
+ "fmla v27.8h, v7.8h, v4.h[3]\n"
+ "fmla v31.8h, v7.8h, v5.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "fmla v20.8h, v6.8h, v3.h[4]\n"
+ "fmla v24.8h, v6.8h, v4.h[4]\n"
+ "fmla v28.8h, v6.8h, v5.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "fmla v21.8h, v7.8h, v3.h[4]\n"
+ "fmla v25.8h, v7.8h, v4.h[4]\n"
+ "fmla v29.8h, v7.8h, v5.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "fmla v22.8h, v6.8h, v3.h[4]\n"
+ "fmla v26.8h, v6.8h, v4.h[4]\n"
+ "fmla v30.8h, v6.8h, v5.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "fmla v23.8h, v7.8h, v3.h[4]\n"
+ "fmla v27.8h, v7.8h, v4.h[4]\n"
+ "fmla v31.8h, v7.8h, v5.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "fmla v20.8h, v6.8h, v3.h[5]\n"
+ "fmla v24.8h, v6.8h, v4.h[5]\n"
+ "fmla v28.8h, v6.8h, v5.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "fmla v21.8h, v7.8h, v3.h[5]\n"
+ "fmla v25.8h, v7.8h, v4.h[5]\n"
+ "fmla v29.8h, v7.8h, v5.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "fmla v22.8h, v6.8h, v3.h[5]\n"
+ "fmla v26.8h, v6.8h, v4.h[5]\n"
+ "fmla v30.8h, v6.8h, v5.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "fmla v23.8h, v7.8h, v3.h[5]\n"
+ "fmla v27.8h, v7.8h, v4.h[5]\n"
+ "fmla v31.8h, v7.8h, v5.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "fmla v20.8h, v6.8h, v3.h[6]\n"
+ "fmla v24.8h, v6.8h, v4.h[6]\n"
+ "fmla v28.8h, v6.8h, v5.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "fmla v21.8h, v7.8h, v3.h[6]\n"
+ "fmla v25.8h, v7.8h, v4.h[6]\n"
+ "fmla v29.8h, v7.8h, v5.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "fmla v22.8h, v6.8h, v3.h[6]\n"
+ "fmla v26.8h, v6.8h, v4.h[6]\n"
+ "fmla v30.8h, v6.8h, v5.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "fmla v23.8h, v7.8h, v3.h[6]\n"
+ "fmla v27.8h, v7.8h, v4.h[6]\n"
+ "fmla v31.8h, v7.8h, v5.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "fmla v20.8h, v6.8h, v3.h[7]\n"
+ "fmla v24.8h, v6.8h, v4.h[7]\n"
+ "fmla v28.8h, v6.8h, v5.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "fmla v21.8h, v7.8h, v3.h[7]\n"
+ "fmla v25.8h, v7.8h, v4.h[7]\n"
+ "fmla v29.8h, v7.8h, v5.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "fmla v22.8h, v6.8h, v3.h[7]\n"
+ "fmla v26.8h, v6.8h, v4.h[7]\n"
+ "fmla v30.8h, v6.8h, v5.h[7]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "fmla v23.8h, v7.8h, v3.h[7]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "fmla v27.8h, v7.8h, v4.h[7]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "fmla v31.8h, v7.8h, v5.h[7]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 277b\n"
+ "278:" // Height 6: Multiply loop: Single iteration only
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "sub x26, x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "fmla v24.8h, v6.8h, v4.h[0]\n"
+ "fmla v28.8h, v6.8h, v5.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "fmla v25.8h, v7.8h, v4.h[0]\n"
+ "fmla v29.8h, v7.8h, v5.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "fmla v26.8h, v6.8h, v4.h[0]\n"
+ "fmla v30.8h, v6.8h, v5.h[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "fmla v27.8h, v7.8h, v4.h[0]\n"
+ "fmla v31.8h, v7.8h, v5.h[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.8h, v6.8h, v0.h[1]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v16.8h, v6.8h, v2.h[1]\n"
+ "fmla v20.8h, v6.8h, v3.h[1]\n"
+ "fmla v24.8h, v6.8h, v4.h[1]\n"
+ "fmla v28.8h, v6.8h, v5.h[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.8h, v7.8h, v0.h[1]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v17.8h, v7.8h, v2.h[1]\n"
+ "fmla v21.8h, v7.8h, v3.h[1]\n"
+ "fmla v25.8h, v7.8h, v4.h[1]\n"
+ "fmla v29.8h, v7.8h, v5.h[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.8h, v6.8h, v0.h[1]\n"
+ "fmla v14.8h, v6.8h, v1.h[1]\n"
+ "fmla v18.8h, v6.8h, v2.h[1]\n"
+ "fmla v22.8h, v6.8h, v3.h[1]\n"
+ "fmla v26.8h, v6.8h, v4.h[1]\n"
+ "fmla v30.8h, v6.8h, v5.h[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.8h, v7.8h, v0.h[1]\n"
+ "fmla v15.8h, v7.8h, v1.h[1]\n"
+ "fmla v19.8h, v7.8h, v2.h[1]\n"
+ "fmla v23.8h, v7.8h, v3.h[1]\n"
+ "fmla v27.8h, v7.8h, v4.h[1]\n"
+ "fmla v31.8h, v7.8h, v5.h[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.8h, v6.8h, v0.h[2]\n"
+ "fmla v12.8h, v6.8h, v1.h[2]\n"
+ "fmla v16.8h, v6.8h, v2.h[2]\n"
+ "fmla v20.8h, v6.8h, v3.h[2]\n"
+ "fmla v24.8h, v6.8h, v4.h[2]\n"
+ "fmla v28.8h, v6.8h, v5.h[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.8h, v7.8h, v0.h[2]\n"
+ "fmla v13.8h, v7.8h, v1.h[2]\n"
+ "fmla v17.8h, v7.8h, v2.h[2]\n"
+ "fmla v21.8h, v7.8h, v3.h[2]\n"
+ "fmla v25.8h, v7.8h, v4.h[2]\n"
+ "fmla v29.8h, v7.8h, v5.h[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.8h, v6.8h, v0.h[2]\n"
+ "fmla v14.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v2.h[2]\n"
+ "fmla v22.8h, v6.8h, v3.h[2]\n"
+ "fmla v26.8h, v6.8h, v4.h[2]\n"
+ "fmla v30.8h, v6.8h, v5.h[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.8h, v7.8h, v0.h[2]\n"
+ "fmla v15.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v2.h[2]\n"
+ "fmla v23.8h, v7.8h, v3.h[2]\n"
+ "fmla v27.8h, v7.8h, v4.h[2]\n"
+ "fmla v31.8h, v7.8h, v5.h[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.8h, v6.8h, v0.h[3]\n"
+ "fmla v12.8h, v6.8h, v1.h[3]\n"
+ "fmla v16.8h, v6.8h, v2.h[3]\n"
+ "fmla v20.8h, v6.8h, v3.h[3]\n"
+ "fmla v24.8h, v6.8h, v4.h[3]\n"
+ "fmla v28.8h, v6.8h, v5.h[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.8h, v7.8h, v0.h[3]\n"
+ "fmla v13.8h, v7.8h, v1.h[3]\n"
+ "fmla v17.8h, v7.8h, v2.h[3]\n"
+ "fmla v21.8h, v7.8h, v3.h[3]\n"
+ "fmla v25.8h, v7.8h, v4.h[3]\n"
+ "fmla v29.8h, v7.8h, v5.h[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "fmla v10.8h, v6.8h, v0.h[3]\n"
+ "fmla v14.8h, v6.8h, v1.h[3]\n"
+ "fmla v18.8h, v6.8h, v2.h[3]\n"
+ "fmla v22.8h, v6.8h, v3.h[3]\n"
+ "fmla v26.8h, v6.8h, v4.h[3]\n"
+ "fmla v30.8h, v6.8h, v5.h[3]\n"
+ "ldr q6, [x11, #0x40]\n"
+ "fmla v11.8h, v7.8h, v0.h[3]\n"
+ "fmla v15.8h, v7.8h, v1.h[3]\n"
+ "fmla v19.8h, v7.8h, v2.h[3]\n"
+ "fmla v23.8h, v7.8h, v3.h[3]\n"
+ "fmla v27.8h, v7.8h, v4.h[3]\n"
+ "fmla v31.8h, v7.8h, v5.h[3]\n"
+ "ldr q7, [x10, #0x40]\n"
+ "fmla v8.8h, v6.8h, v0.h[4]\n"
+ "fmla v12.8h, v6.8h, v1.h[4]\n"
+ "fmla v16.8h, v6.8h, v2.h[4]\n"
+ "fmla v20.8h, v6.8h, v3.h[4]\n"
+ "fmla v24.8h, v6.8h, v4.h[4]\n"
+ "fmla v28.8h, v6.8h, v5.h[4]\n"
+ "ldr q6, [x9, #0x40]\n"
+ "fmla v9.8h, v7.8h, v0.h[4]\n"
+ "fmla v13.8h, v7.8h, v1.h[4]\n"
+ "fmla v17.8h, v7.8h, v2.h[4]\n"
+ "fmla v21.8h, v7.8h, v3.h[4]\n"
+ "fmla v25.8h, v7.8h, v4.h[4]\n"
+ "fmla v29.8h, v7.8h, v5.h[4]\n"
+ "ldr q7, [x28, #0x40]\n"
+ "fmla v10.8h, v6.8h, v0.h[4]\n"
+ "fmla v14.8h, v6.8h, v1.h[4]\n"
+ "fmla v18.8h, v6.8h, v2.h[4]\n"
+ "fmla v22.8h, v6.8h, v3.h[4]\n"
+ "fmla v26.8h, v6.8h, v4.h[4]\n"
+ "fmla v30.8h, v6.8h, v5.h[4]\n"
+ "ldr q6, [x11, #0x50]\n"
+ "fmla v11.8h, v7.8h, v0.h[4]\n"
+ "fmla v15.8h, v7.8h, v1.h[4]\n"
+ "fmla v19.8h, v7.8h, v2.h[4]\n"
+ "fmla v23.8h, v7.8h, v3.h[4]\n"
+ "fmla v27.8h, v7.8h, v4.h[4]\n"
+ "fmla v31.8h, v7.8h, v5.h[4]\n"
+ "ldr q7, [x10, #0x50]\n"
+ "fmla v8.8h, v6.8h, v0.h[5]\n"
+ "fmla v12.8h, v6.8h, v1.h[5]\n"
+ "fmla v16.8h, v6.8h, v2.h[5]\n"
+ "fmla v20.8h, v6.8h, v3.h[5]\n"
+ "fmla v24.8h, v6.8h, v4.h[5]\n"
+ "fmla v28.8h, v6.8h, v5.h[5]\n"
+ "ldr q6, [x9, #0x50]\n"
+ "fmla v9.8h, v7.8h, v0.h[5]\n"
+ "fmla v13.8h, v7.8h, v1.h[5]\n"
+ "fmla v17.8h, v7.8h, v2.h[5]\n"
+ "fmla v21.8h, v7.8h, v3.h[5]\n"
+ "fmla v25.8h, v7.8h, v4.h[5]\n"
+ "fmla v29.8h, v7.8h, v5.h[5]\n"
+ "ldr q7, [x28, #0x50]\n"
+ "fmla v10.8h, v6.8h, v0.h[5]\n"
+ "fmla v14.8h, v6.8h, v1.h[5]\n"
+ "fmla v18.8h, v6.8h, v2.h[5]\n"
+ "fmla v22.8h, v6.8h, v3.h[5]\n"
+ "fmla v26.8h, v6.8h, v4.h[5]\n"
+ "fmla v30.8h, v6.8h, v5.h[5]\n"
+ "ldr q6, [x11, #0x60]\n"
+ "fmla v11.8h, v7.8h, v0.h[5]\n"
+ "fmla v15.8h, v7.8h, v1.h[5]\n"
+ "fmla v19.8h, v7.8h, v2.h[5]\n"
+ "fmla v23.8h, v7.8h, v3.h[5]\n"
+ "fmla v27.8h, v7.8h, v4.h[5]\n"
+ "fmla v31.8h, v7.8h, v5.h[5]\n"
+ "ldr q7, [x10, #0x60]\n"
+ "fmla v8.8h, v6.8h, v0.h[6]\n"
+ "fmla v12.8h, v6.8h, v1.h[6]\n"
+ "fmla v16.8h, v6.8h, v2.h[6]\n"
+ "fmla v20.8h, v6.8h, v3.h[6]\n"
+ "fmla v24.8h, v6.8h, v4.h[6]\n"
+ "fmla v28.8h, v6.8h, v5.h[6]\n"
+ "ldr q6, [x9, #0x60]\n"
+ "fmla v9.8h, v7.8h, v0.h[6]\n"
+ "fmla v13.8h, v7.8h, v1.h[6]\n"
+ "fmla v17.8h, v7.8h, v2.h[6]\n"
+ "fmla v21.8h, v7.8h, v3.h[6]\n"
+ "fmla v25.8h, v7.8h, v4.h[6]\n"
+ "fmla v29.8h, v7.8h, v5.h[6]\n"
+ "ldr q7, [x28, #0x60]\n"
+ "fmla v10.8h, v6.8h, v0.h[6]\n"
+ "fmla v14.8h, v6.8h, v1.h[6]\n"
+ "fmla v18.8h, v6.8h, v2.h[6]\n"
+ "fmla v22.8h, v6.8h, v3.h[6]\n"
+ "fmla v26.8h, v6.8h, v4.h[6]\n"
+ "fmla v30.8h, v6.8h, v5.h[6]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
+ "fmla v11.8h, v7.8h, v0.h[6]\n"
+ "fmla v15.8h, v7.8h, v1.h[6]\n"
+ "fmla v19.8h, v7.8h, v2.h[6]\n"
+ "fmla v23.8h, v7.8h, v3.h[6]\n"
+ "fmla v27.8h, v7.8h, v4.h[6]\n"
+ "fmla v31.8h, v7.8h, v5.h[6]\n"
+ "ldr q7, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ "fmla v8.8h, v6.8h, v0.h[7]\n"
+ "fmla v12.8h, v6.8h, v1.h[7]\n"
+ "fmla v16.8h, v6.8h, v2.h[7]\n"
+ "fmla v20.8h, v6.8h, v3.h[7]\n"
+ "fmla v24.8h, v6.8h, v4.h[7]\n"
+ "fmla v28.8h, v6.8h, v5.h[7]\n"
+ "ldr q6, [x9, #0x70]\n"
+ "add x9, x9, #0x80\n"
+ "fmla v9.8h, v7.8h, v0.h[7]\n"
+ "fmla v13.8h, v7.8h, v1.h[7]\n"
+ "fmla v17.8h, v7.8h, v2.h[7]\n"
+ "fmla v21.8h, v7.8h, v3.h[7]\n"
+ "fmla v25.8h, v7.8h, v4.h[7]\n"
+ "fmla v29.8h, v7.8h, v5.h[7]\n"
+ "ldr q7, [x28, #0x70]\n"
+ "add x28, x28, #0x80\n"
+ "fmla v10.8h, v6.8h, v0.h[7]\n"
+ "fmla v14.8h, v6.8h, v1.h[7]\n"
+ "fmla v18.8h, v6.8h, v2.h[7]\n"
+ "fmla v22.8h, v6.8h, v3.h[7]\n"
+ "fmla v26.8h, v6.8h, v4.h[7]\n"
+ "fmla v30.8h, v6.8h, v5.h[7]\n"
+ "fmla v11.8h, v7.8h, v0.h[7]\n"
+ "fmla v15.8h, v7.8h, v1.h[7]\n"
+ "fmla v19.8h, v7.8h, v2.h[7]\n"
+ "fmla v23.8h, v7.8h, v3.h[7]\n"
+ "fmla v27.8h, v7.8h, v4.h[7]\n"
+ "fmla v31.8h, v7.8h, v5.h[7]\n"
+ "279:" // Height 6: Multiply loop: Main loop skip
+ "cbz x26, 281f\n"
+ "280:" // Height 6: Multiply loop: Odd block loop
+ "ldr h0, [x25], #0x2\n"
+ "ldr h1, [x24], #0x2\n"
+ "sub x26, x26, #0x1\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h4, [x21], #0x2\n"
+ "ldr h5, [x20], #0x2\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v8.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[0]\n"
+ "fmla v16.8h, v6.8h, v2.h[0]\n"
+ "fmla v20.8h, v6.8h, v3.h[0]\n"
+ "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
+ "fmla v24.8h, v6.8h, v4.h[0]\n"
+ "fmla v28.8h, v6.8h, v5.h[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v9.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[0]\n"
+ "fmla v17.8h, v7.8h, v2.h[0]\n"
+ "fmla v21.8h, v7.8h, v3.h[0]\n"
+ "fmla v25.8h, v7.8h, v4.h[0]\n"
+ "fmla v29.8h, v7.8h, v5.h[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v10.8h, v6.8h, v0.h[0]\n"
+ "fmla v14.8h, v6.8h, v1.h[0]\n"
+ "fmla v18.8h, v6.8h, v2.h[0]\n"
+ "fmla v22.8h, v6.8h, v3.h[0]\n"
+ "fmla v26.8h, v6.8h, v4.h[0]\n"
+ "fmla v30.8h, v6.8h, v5.h[0]\n"
+ "fmla v11.8h, v7.8h, v0.h[0]\n"
+ "fmla v15.8h, v7.8h, v1.h[0]\n"
+ "fmla v19.8h, v7.8h, v2.h[0]\n"
+ "fmla v23.8h, v7.8h, v3.h[0]\n"
+ "fmla v27.8h, v7.8h, v4.h[0]\n"
+ "fmla v31.8h, v7.8h, v5.h[0]\n"
+ "cbnz x26, 280b\n"
+ "281:" // Height 6: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 274b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "tbz %x[flags], #1, 282f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.8h }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.8h }, [x19]\n"
+ "fmin v8.8h, v8.8h, v1.8h\n"
+ "fmin v9.8h, v9.8h, v1.8h\n"
+ "fmin v10.8h, v10.8h, v1.8h\n"
+ "fmin v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v1.8h\n"
+ "fmin v13.8h, v13.8h, v1.8h\n"
+ "fmin v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v1.8h\n"
+ "fmin v16.8h, v16.8h, v1.8h\n"
+ "fmin v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v1.8h\n"
+ "fmin v19.8h, v19.8h, v1.8h\n"
+ "fmin v20.8h, v20.8h, v1.8h\n"
+ "fmin v21.8h, v21.8h, v1.8h\n"
+ "fmin v22.8h, v22.8h, v1.8h\n"
+ "fmin v23.8h, v23.8h, v1.8h\n"
+ "fmin v24.8h, v24.8h, v1.8h\n"
+ "fmin v25.8h, v25.8h, v1.8h\n"
+ "fmin v26.8h, v26.8h, v1.8h\n"
+ "fmin v27.8h, v27.8h, v1.8h\n"
+ "fmin v28.8h, v28.8h, v1.8h\n"
+ "fmin v29.8h, v29.8h, v1.8h\n"
+ "fmin v30.8h, v30.8h, v1.8h\n"
+ "fmin v31.8h, v31.8h, v1.8h\n"
+ "fmax v8.8h, v8.8h, v0.8h\n"
+ "fmax v9.8h, v9.8h, v0.8h\n"
+ "fmax v10.8h, v10.8h, v0.8h\n"
+ "fmax v11.8h, v11.8h, v0.8h\n"
+ "fmax v12.8h, v12.8h, v0.8h\n"
+ "fmax v13.8h, v13.8h, v0.8h\n"
+ "fmax v14.8h, v14.8h, v0.8h\n"
+ "fmax v15.8h, v15.8h, v0.8h\n"
+ "fmax v16.8h, v16.8h, v0.8h\n"
+ "fmax v17.8h, v17.8h, v0.8h\n"
+ "fmax v18.8h, v18.8h, v0.8h\n"
+ "fmax v19.8h, v19.8h, v0.8h\n"
+ "fmax v20.8h, v20.8h, v0.8h\n"
+ "fmax v21.8h, v21.8h, v0.8h\n"
+ "fmax v22.8h, v22.8h, v0.8h\n"
+ "fmax v23.8h, v23.8h, v0.8h\n"
+ "fmax v24.8h, v24.8h, v0.8h\n"
+ "fmax v25.8h, v25.8h, v0.8h\n"
+ "fmax v26.8h, v26.8h, v0.8h\n"
+ "fmax v27.8h, v27.8h, v0.8h\n"
+ "fmax v28.8h, v28.8h, v0.8h\n"
+ "fmax v29.8h, v29.8h, v0.8h\n"
+ "fmax v30.8h, v30.8h, v0.8h\n"
+ "fmax v31.8h, v31.8h, v0.8h\n"
+ "282:" // Height 6: No activation
+ "cmp x13, #0x20\n"
+ "bge 299f\n"
+ "tbz x13, #4, 290f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v9.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v13.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v17.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v21.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v25.8h }, [x21], #0x10\n"
+ "st1 { v28.8h }, [x20], #0x10\n"
+ "st1 { v29.8h }, [x20], #0x10\n"
+ "tbz x13, #3, 286f\n"
+ "st1 { v10.8h }, [x12], #0x10\n"
+ "st1 { v14.8h }, [x24], #0x10\n"
+ "st1 { v18.8h }, [x23], #0x10\n"
+ "st1 { v22.8h }, [x22], #0x10\n"
+ "st1 { v26.8h }, [x21], #0x10\n"
+ "st1 { v30.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 284f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
+ "tbz x13, #1, 283f\n"
+ "st1 { v11.s }[2], [x12], #0x4\n"
+ "st1 { v15.s }[2], [x24], #0x4\n"
+ "st1 { v19.s }[2], [x23], #0x4\n"
+ "st1 { v23.s }[2], [x22], #0x4\n"
+ "st1 { v27.s }[2], [x21], #0x4\n"
+ "st1 { v31.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v11.h }[6], [x12]\n"
+ "st1 { v15.h }[6], [x24]\n"
+ "st1 { v19.h }[6], [x23]\n"
+ "st1 { v23.h }[6], [x22]\n"
+ "st1 { v27.h }[6], [x21]\n"
+ "st1 { v31.h }[6], [x20]\n"
+ "b 298f\n"
+ "283:" // Height 6: Partial direct writeback: partial_1_28
+ "tbz x13, #0, 298f\n"
+ "st1 { v11.h }[4], [x12]\n"
+ "st1 { v15.h }[4], [x24]\n"
+ "st1 { v19.h }[4], [x23]\n"
+ "st1 { v23.h }[4], [x22]\n"
+ "st1 { v27.h }[4], [x21]\n"
+ "st1 { v31.h }[4], [x20]\n"
+ "b 298f\n"
+ "284:" // Height 6: Partial direct writeback: partial_2_24
+ "tbz x13, #1, 285f\n"
+ "str s11, [x12], #0x4\n"
+ "str s15, [x24], #0x4\n"
+ "str s19, [x23], #0x4\n"
+ "str s23, [x22], #0x4\n"
+ "str s27, [x21], #0x4\n"
+ "str s31, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v11.h }[2], [x12]\n"
+ "st1 { v15.h }[2], [x24]\n"
+ "st1 { v19.h }[2], [x23]\n"
+ "st1 { v23.h }[2], [x22]\n"
+ "st1 { v27.h }[2], [x21]\n"
+ "st1 { v31.h }[2], [x20]\n"
+ "b 298f\n"
+ "285:" // Height 6: Partial direct writeback: partial_1_24
+ "tbz x13, #0, 298f\n"
+ "str h11, [x12, #0x0]\n"
+ "str h15, [x24, #0x0]\n"
+ "str h19, [x23, #0x0]\n"
+ "str h23, [x22, #0x0]\n"
+ "str h27, [x21, #0x0]\n"
+ "str h31, [x20, #0x0]\n"
+ "b 298f\n"
+ "286:" // Height 6: Partial direct writeback: partial_4_16
+ "tbz x13, #2, 288f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
+ "tbz x13, #1, 287f\n"
+ "st1 { v10.s }[2], [x12], #0x4\n"
+ "st1 { v14.s }[2], [x24], #0x4\n"
+ "st1 { v18.s }[2], [x23], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "st1 { v26.s }[2], [x21], #0x4\n"
+ "st1 { v30.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v10.h }[6], [x12]\n"
+ "st1 { v14.h }[6], [x24]\n"
+ "st1 { v18.h }[6], [x23]\n"
+ "st1 { v22.h }[6], [x22]\n"
+ "st1 { v26.h }[6], [x21]\n"
+ "st1 { v30.h }[6], [x20]\n"
+ "b 298f\n"
+ "287:" // Height 6: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 298f\n"
+ "st1 { v10.h }[4], [x12]\n"
+ "st1 { v14.h }[4], [x24]\n"
+ "st1 { v18.h }[4], [x23]\n"
+ "st1 { v22.h }[4], [x22]\n"
+ "st1 { v26.h }[4], [x21]\n"
+ "st1 { v30.h }[4], [x20]\n"
+ "b 298f\n"
+ "288:" // Height 6: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 289f\n"
+ "str s10, [x12], #0x4\n"
+ "str s14, [x24], #0x4\n"
+ "str s18, [x23], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "str s26, [x21], #0x4\n"
+ "str s30, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v10.h }[2], [x12]\n"
+ "st1 { v14.h }[2], [x24]\n"
+ "st1 { v18.h }[2], [x23]\n"
+ "st1 { v22.h }[2], [x22]\n"
+ "st1 { v26.h }[2], [x21]\n"
+ "st1 { v30.h }[2], [x20]\n"
+ "b 298f\n"
+ "289:" // Height 6: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 298f\n"
+ "str h10, [x12, #0x0]\n"
+ "str h14, [x24, #0x0]\n"
+ "str h18, [x23, #0x0]\n"
+ "str h22, [x22, #0x0]\n"
+ "str h26, [x21, #0x0]\n"
+ "str h30, [x20, #0x0]\n"
+ "b 298f\n"
+ "290:" // Height 6: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 294f\n"
+ "st1 { v8.8h }, [x12], #0x10\n"
+ "st1 { v12.8h }, [x24], #0x10\n"
+ "st1 { v16.8h }, [x23], #0x10\n"
+ "st1 { v20.8h }, [x22], #0x10\n"
+ "st1 { v24.8h }, [x21], #0x10\n"
+ "st1 { v28.8h }, [x20], #0x10\n"
+ "tbz x13, #2, 292f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
+ "tbz x13, #1, 291f\n"
+ "st1 { v9.s }[2], [x12], #0x4\n"
+ "st1 { v13.s }[2], [x24], #0x4\n"
+ "st1 { v17.s }[2], [x23], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "st1 { v25.s }[2], [x21], #0x4\n"
+ "st1 { v29.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v9.h }[6], [x12]\n"
+ "st1 { v13.h }[6], [x24]\n"
+ "st1 { v17.h }[6], [x23]\n"
+ "st1 { v21.h }[6], [x22]\n"
+ "st1 { v25.h }[6], [x21]\n"
+ "st1 { v29.h }[6], [x20]\n"
+ "b 298f\n"
+ "291:" // Height 6: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 298f\n"
+ "st1 { v9.h }[4], [x12]\n"
+ "st1 { v13.h }[4], [x24]\n"
+ "st1 { v17.h }[4], [x23]\n"
+ "st1 { v21.h }[4], [x22]\n"
+ "st1 { v25.h }[4], [x21]\n"
+ "st1 { v29.h }[4], [x20]\n"
+ "b 298f\n"
+ "292:" // Height 6: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 293f\n"
+ "str s9, [x12], #0x4\n"
+ "str s13, [x24], #0x4\n"
+ "str s17, [x23], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "str s25, [x21], #0x4\n"
+ "str s29, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v9.h }[2], [x12]\n"
+ "st1 { v13.h }[2], [x24]\n"
+ "st1 { v17.h }[2], [x23]\n"
+ "st1 { v21.h }[2], [x22]\n"
+ "st1 { v25.h }[2], [x21]\n"
+ "st1 { v29.h }[2], [x20]\n"
+ "b 298f\n"
+ "293:" // Height 6: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 298f\n"
+ "str h9, [x12, #0x0]\n"
+ "str h13, [x24, #0x0]\n"
+ "str h17, [x23, #0x0]\n"
+ "str h21, [x22, #0x0]\n"
+ "str h25, [x21, #0x0]\n"
+ "str h29, [x20, #0x0]\n"
+ "b 298f\n"
+ "294:" // Height 6: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 296f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
+ "tbz x13, #1, 295f\n"
+ "st1 { v8.s }[2], [x12], #0x4\n"
+ "st1 { v12.s }[2], [x24], #0x4\n"
+ "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v28.s }[2], [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v8.h }[6], [x12]\n"
+ "st1 { v12.h }[6], [x24]\n"
+ "st1 { v16.h }[6], [x23]\n"
+ "st1 { v20.h }[6], [x22]\n"
+ "st1 { v24.h }[6], [x21]\n"
+ "st1 { v28.h }[6], [x20]\n"
+ "b 298f\n"
+ "295:" // Height 6: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 298f\n"
+ "st1 { v8.h }[4], [x12]\n"
+ "st1 { v12.h }[4], [x24]\n"
+ "st1 { v16.h }[4], [x23]\n"
+ "st1 { v20.h }[4], [x22]\n"
+ "st1 { v24.h }[4], [x21]\n"
+ "st1 { v28.h }[4], [x20]\n"
+ "b 298f\n"
+ "296:" // Height 6: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 297f\n"
+ "str s8, [x12], #0x4\n"
+ "str s12, [x24], #0x4\n"
+ "str s16, [x23], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "str s24, [x21], #0x4\n"
+ "str s28, [x20], #0x4\n"
+ "tbz x13, #0, 298f\n"
+ "st1 { v8.h }[2], [x12]\n"
+ "st1 { v12.h }[2], [x24]\n"
+ "st1 { v16.h }[2], [x23]\n"
+ "st1 { v20.h }[2], [x22]\n"
+ "st1 { v24.h }[2], [x21]\n"
+ "st1 { v28.h }[2], [x20]\n"
+ "b 298f\n"
+ "297:" // Height 6: Partial direct writeback: partial_1_0
+ "str h8, [x12, #0x0]\n"
+ "str h12, [x24, #0x0]\n"
+ "str h16, [x23, #0x0]\n"
+ "str h20, [x22, #0x0]\n"
+ "str h24, [x21, #0x0]\n"
+ "str h28, [x20, #0x0]\n"
+ "298:" // Height 6: Partial direct writeback: Done
+ "b 300f\n"
+ "299:" // Height 6: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
+ "300:" // Height 6: Writeback done
+ "subs x13, x13, #0x20\n"
+ "bgt 252b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 302f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 301f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "301:" // Update direct input
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "302:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp
new file mode 100644
index 0000000000..08f5aeb2d8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<float>, \
+ size_t, size_t, \
+ const float *, \
+ size_t, \
+ IndirectOutputArg<float>, \
+ const float *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffhybrid_fp32_mla_6x16( ARGLIST );
+
+class cls_a64_ffhybrid_fp32_mla_6x16
+{
+public:
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+ static unsigned int stripe_width()
+ {
+ return 4;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL128_BL32;
+ }
+
+ static unsigned int out_width()
+ {
+ return 16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 1> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 13.16 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffhybrid_fp32_mla_6x16;
+ cls_a64_ffhybrid_fp32_mla_6x16(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
new file mode 100644
index 0000000000..f811116a06
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
@@ -0,0 +1,3461 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void a64_ffhybrid_fp32_mla_6x16 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<float> A_arg,
+ size_t M, size_t N, const float *B_ptr, size_t B_stride, IndirectOutputArg<float> output_arg,
+ const float *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const float *B_ptr = {};
+ const float *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 171f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 137f\n"
+ "beq 103f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 69f\n"
+ "beq 35f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 3f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
+ "b 15f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 14f\n"
+ "cmp x13, #0x10\n"
+ "bge 13f\n"
+ "tbz x13, #3, 8f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 5f\n"
+ "ldr d11, [x12], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "b 12f\n"
+ "5:" // Height 1: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "b 12f\n"
+ "6:" // Height 1: Partial accumulate: partial_2_8
+ "tbz x13, #1, 7f\n"
+ "ldr d10, [x12], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "b 12f\n"
+ "7:" // Height 1: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "b 12f\n"
+ "8:" // Height 1: Partial accumulate: partial_4_0
+ "tbz x13, #2, 10f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 9f\n"
+ "ldr d9, [x12], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "b 12f\n"
+ "9:" // Height 1: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 12f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "b 12f\n"
+ "10:" // Height 1: Partial accumulate: partial_2_0
+ "tbz x13, #1, 11f\n"
+ "ldr d8, [x12], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 12f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "b 12f\n"
+ "11:" // Height 1: Partial accumulate: partial_1_0
+ "ldr s8, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
+ "12:" // Height 1: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 15f\n"
+ "13:" // Height 1: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "b 15f\n"
+ "14:" // Height 1: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "15:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "16:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 17f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "b 18f\n"
+ "17:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "18:" // Height 1: input setup done
+ "cmp x26, #0x4\n"
+ "blt 21f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 20f\n"
+ "19:" // Height 1: Multiply loop: Main loop head
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "add x25, x25, #0x10\n"
+ "ldr q0, [x25, #0x0]\n"
+ "add x11, x11, #0x40\n"
+ "ldr q6, [x11, #0x0]\n"
+ "add x10, x10, #0x40\n"
+ "ldr q7, [x10, #0x0]\n"
+ "add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
+ "bge 19b\n"
+ "20:" // Height 1: Multiply loop: Single iteration only
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "sub x26, x26, #0x4\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "add x25, x25, #0x10\n"
+ "add x11, x11, #0x40\n"
+ "add x10, x10, #0x40\n"
+ "add x9, x9, #0x40\n"
+ "add x28, x28, #0x40\n"
+ "21:" // Height 1: Multiply loop: Main loop skip
+ "cbz x26, 23f\n"
+ "22:" // Height 1: Multiply loop: Odd block loop
+ "ldr s0, [x25], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 22b\n"
+ "23:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 16b\n"
+ "tbz %x[flags], #1, 24f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "24:" // Height 1: No activation
+ "cmp x13, #0x10\n"
+ "bge 33f\n"
+ "tbz x13, #3, 28f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 26f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 25f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "b 32f\n"
+ "25:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 32f\n"
+ "str s11, [x12, #0x0]\n"
+ "b 32f\n"
+ "26:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 27f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "b 32f\n"
+ "27:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 32f\n"
+ "str s10, [x12, #0x0]\n"
+ "b 32f\n"
+ "28:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 30f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 29f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "b 32f\n"
+ "29:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 32f\n"
+ "str s9, [x12, #0x0]\n"
+ "b 32f\n"
+ "30:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 31f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #0, 32f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "b 32f\n"
+ "31:" // Height 1: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "32:" // Height 1: Partial direct writeback: Done
+ "b 34f\n"
+ "33:" // Height 1: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "34:" // Height 1: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 2b\n"
+ "b 206f\n"
+ "35:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "36:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 37f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 37f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 37f\n"
+ "mov x10, x11\n"
+ "37:" // Height 2: B setup done
+ "cbz x14, 38f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "add x14, x14, #0x40\n"
+ "b 49f\n"
+ "38:" // Height 2: no bias
+ "tbz %x[flags], #0, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x10\n"
+ "add x24, x12, x19, LSL #2\n"
+ "bge 47f\n"
+ "tbz x13, #3, 42f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 40f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 39f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "b 46f\n"
+ "39:" // Height 2: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 46f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "b 46f\n"
+ "40:" // Height 2: Partial accumulate: partial_2_8
+ "tbz x13, #1, 41f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "b 46f\n"
+ "41:" // Height 2: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 46f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "b 46f\n"
+ "42:" // Height 2: Partial accumulate: partial_4_0
+ "tbz x13, #2, 44f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 43f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "b 46f\n"
+ "43:" // Height 2: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 46f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "b 46f\n"
+ "44:" // Height 2: Partial accumulate: partial_2_0
+ "tbz x13, #1, 45f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 46f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "b 46f\n"
+ "45:" // Height 2: Partial accumulate: partial_1_0
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "46:" // Height 2: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 49f\n"
+ "47:" // Height 2: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "b 49f\n"
+ "48:" // Height 2: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "49:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "50:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 51f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 52f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "b 52f\n"
+ "51:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "52:" // Height 2: input setup done
+ "cmp x26, #0x4\n"
+ "blt 55f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 54f\n"
+ "53:" // Height 2: Multiply loop: Main loop head
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 53b\n"
+ "54:" // Height 2: Multiply loop: Single iteration only
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "sub x26, x26, #0x4\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "55:" // Height 2: Multiply loop: Main loop skip
+ "cbz x26, 57f\n"
+ "56:" // Height 2: Multiply loop: Odd block loop
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "add x9, x9, #0x10\n"
+ "add x28, x28, #0x10\n"
+ "cbnz x26, 56b\n"
+ "57:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 50b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "tbz %x[flags], #1, 58f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "58:" // Height 2: No activation
+ "cmp x13, #0x10\n"
+ "bge 67f\n"
+ "tbz x13, #3, 62f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "tbz x13, #2, 60f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 59f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "b 66f\n"
+ "59:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 66f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "b 66f\n"
+ "60:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 61f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "b 66f\n"
+ "61:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 66f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "b 66f\n"
+ "62:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 64f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "tbz x13, #1, 63f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "b 66f\n"
+ "63:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 66f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "b 66f\n"
+ "64:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 65f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "tbz x13, #0, 66f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "b 66f\n"
+ "65:" // Height 2: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "66:" // Height 2: Partial direct writeback: Done
+ "b 68f\n"
+ "67:" // Height 2: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "68:" // Height 2: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 36b\n"
+ "b 206f\n"
+ "69:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "70:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 71f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 71f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 71f\n"
+ "mov x10, x11\n"
+ "71:" // Height 3: B setup done
+ "cbz x14, 72f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "b 83f\n"
+ "72:" // Height 3: no bias
+ "tbz %x[flags], #0, 82f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x23, x24, x19, LSL #2\n"
+ "bge 81f\n"
+ "tbz x13, #3, 76f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 74f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 73f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "b 80f\n"
+ "73:" // Height 3: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 80f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "b 80f\n"
+ "74:" // Height 3: Partial accumulate: partial_2_8
+ "tbz x13, #1, 75f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "b 80f\n"
+ "75:" // Height 3: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 80f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "b 80f\n"
+ "76:" // Height 3: Partial accumulate: partial_4_0
+ "tbz x13, #2, 78f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 77f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "b 80f\n"
+ "77:" // Height 3: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 80f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "b 80f\n"
+ "78:" // Height 3: Partial accumulate: partial_2_0
+ "tbz x13, #1, 79f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "tbz x13, #0, 80f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "b 80f\n"
+ "79:" // Height 3: Partial accumulate: partial_1_0
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "80:" // Height 3: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 83f\n"
+ "81:" // Height 3: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "b 83f\n"
+ "82:" // Height 3: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "83:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "84:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 85f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 86f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "b 86f\n"
+ "85:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "86:" // Height 3: input setup done
+ "cmp x26, #0x4\n"
+ "blt 89f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 88f\n"
+ "87:" // Height 3: Multiply loop: Main loop head
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 87b\n"
+ "88:" // Height 3: Multiply loop: Single iteration only
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "89:" // Height 3: Multiply loop: Main loop skip
+ "cbz x26, 91f\n"
+ "90:" // Height 3: Multiply loop: Odd block loop
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "cbnz x26, 90b\n"
+ "91:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 84b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "tbz %x[flags], #1, 92f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "92:" // Height 3: No activation
+ "cmp x13, #0x10\n"
+ "bge 101f\n"
+ "tbz x13, #3, 96f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "tbz x13, #2, 94f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 93f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "b 100f\n"
+ "93:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 100f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "b 100f\n"
+ "94:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 95f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "b 100f\n"
+ "95:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 100f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "b 100f\n"
+ "96:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 98f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "tbz x13, #1, 97f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "b 100f\n"
+ "97:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 100f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "b 100f\n"
+ "98:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 99f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x13, #0, 100f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "b 100f\n"
+ "99:" // Height 3: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "100:" // Height 3: Partial direct writeback: Done
+ "b 102f\n"
+ "101:" // Height 3: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "102:" // Height 3: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 70b\n"
+ "b 206f\n"
+ "103:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "104:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 105f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 105f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 105f\n"
+ "mov x10, x11\n"
+ "105:" // Height 4: B setup done
+ "cbz x14, 106f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "b 117f\n"
+ "106:" // Height 4: no bias
+ "tbz %x[flags], #0, 116f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x22, x23, x19, LSL #2\n"
+ "bge 115f\n"
+ "tbz x13, #3, 110f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 108f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 107f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "b 114f\n"
+ "107:" // Height 4: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 114f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "b 114f\n"
+ "108:" // Height 4: Partial accumulate: partial_2_8
+ "tbz x13, #1, 109f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "b 114f\n"
+ "109:" // Height 4: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 114f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "b 114f\n"
+ "110:" // Height 4: Partial accumulate: partial_4_0
+ "tbz x13, #2, 112f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 111f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "b 114f\n"
+ "111:" // Height 4: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 114f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "b 114f\n"
+ "112:" // Height 4: Partial accumulate: partial_2_0
+ "tbz x13, #1, 113f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x13, #0, 114f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "b 114f\n"
+ "113:" // Height 4: Partial accumulate: partial_1_0
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "114:" // Height 4: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 117f\n"
+ "115:" // Height 4: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "b 117f\n"
+ "116:" // Height 4: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "117:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "118:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 119f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 120f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "b 120f\n"
+ "119:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "120:" // Height 4: input setup done
+ "cmp x26, #0x4\n"
+ "blt 123f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 122f\n"
+ "121:" // Height 4: Multiply loop: Main loop head
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "fmla v20.4s, v6.4s, v3.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "fmla v21.4s, v7.4s, v3.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "fmla v22.4s, v6.4s, v3.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "fmla v23.4s, v7.4s, v3.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "fmla v20.4s, v6.4s, v3.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "fmla v21.4s, v7.4s, v3.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "fmla v22.4s, v6.4s, v3.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "fmla v23.4s, v7.4s, v3.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "fmla v20.4s, v6.4s, v3.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "fmla v21.4s, v7.4s, v3.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v22.4s, v6.4s, v3.s[3]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "fmla v23.4s, v7.4s, v3.s[3]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 121b\n"
+ "122:" // Height 4: Multiply loop: Single iteration only
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "fmla v20.4s, v6.4s, v3.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "fmla v21.4s, v7.4s, v3.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "fmla v22.4s, v6.4s, v3.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "fmla v23.4s, v7.4s, v3.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "fmla v20.4s, v6.4s, v3.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "fmla v21.4s, v7.4s, v3.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "fmla v22.4s, v6.4s, v3.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "fmla v23.4s, v7.4s, v3.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "fmla v20.4s, v6.4s, v3.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "fmla v21.4s, v7.4s, v3.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v22.4s, v6.4s, v3.s[3]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "fmla v23.4s, v7.4s, v3.s[3]\n"
+ "123:" // Height 4: Multiply loop: Main loop skip
+ "cbz x26, 125f\n"
+ "124:" // Height 4: Multiply loop: Odd block loop
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "add x10, x10, #0x10\n"
+ "add x9, x9, #0x10\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "cbnz x26, 124b\n"
+ "125:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 118b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "tbz %x[flags], #1, 126f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v0.4s\n"
+ "126:" // Height 4: No activation
+ "cmp x13, #0x10\n"
+ "bge 135f\n"
+ "tbz x13, #3, 130f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 128f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 127f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "b 134f\n"
+ "127:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 134f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "b 134f\n"
+ "128:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 129f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "b 134f\n"
+ "129:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 134f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "b 134f\n"
+ "130:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 132f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 131f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "b 134f\n"
+ "131:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 134f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "b 134f\n"
+ "132:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 133f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "tbz x13, #0, 134f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "b 134f\n"
+ "133:" // Height 4: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "134:" // Height 4: Partial direct writeback: Done
+ "b 136f\n"
+ "135:" // Height 4: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "136:" // Height 4: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 104b\n"
+ "b 206f\n"
+ "137:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "138:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 139f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 139f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 139f\n"
+ "mov x10, x11\n"
+ "139:" // Height 5: B setup done
+ "cbz x14, 140f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v25.16b, v9.16b\n"
+ "mov v26.16b, v10.16b\n"
+ "mov v27.16b, v11.16b\n"
+ "b 151f\n"
+ "140:" // Height 5: no bias
+ "tbz %x[flags], #0, 150f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x21, x22, x19, LSL #2\n"
+ "bge 149f\n"
+ "tbz x13, #3, 144f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 142f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 141f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "b 148f\n"
+ "141:" // Height 5: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "b 148f\n"
+ "142:" // Height 5: Partial accumulate: partial_2_8
+ "tbz x13, #1, 143f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "b 148f\n"
+ "143:" // Height 5: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "b 148f\n"
+ "144:" // Height 5: Partial accumulate: partial_4_0
+ "tbz x13, #2, 146f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 145f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "b 148f\n"
+ "145:" // Height 5: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "b 148f\n"
+ "146:" // Height 5: Partial accumulate: partial_2_0
+ "tbz x13, #1, 147f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "b 148f\n"
+ "147:" // Height 5: Partial accumulate: partial_1_0
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "148:" // Height 5: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 151f\n"
+ "149:" // Height 5: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "b 151f\n"
+ "150:" // Height 5: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "151:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "152:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 153f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 154f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "b 154f\n"
+ "153:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "154:" // Height 5: input setup done
+ "cmp x26, #0x4\n"
+ "blt 157f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 156f\n"
+ "155:" // Height 5: Multiply loop: Main loop head
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla v24.4s, v6.4s, v4.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "fmla v25.4s, v7.4s, v4.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "fmla v26.4s, v6.4s, v4.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "fmla v27.4s, v7.4s, v4.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "fmla v20.4s, v6.4s, v3.s[1]\n"
+ "fmla v24.4s, v6.4s, v4.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "fmla v21.4s, v7.4s, v3.s[1]\n"
+ "fmla v25.4s, v7.4s, v4.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "fmla v22.4s, v6.4s, v3.s[1]\n"
+ "fmla v26.4s, v6.4s, v4.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "fmla v23.4s, v7.4s, v3.s[1]\n"
+ "fmla v27.4s, v7.4s, v4.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "fmla v20.4s, v6.4s, v3.s[2]\n"
+ "fmla v24.4s, v6.4s, v4.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "fmla v21.4s, v7.4s, v3.s[2]\n"
+ "fmla v25.4s, v7.4s, v4.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "fmla v22.4s, v6.4s, v3.s[2]\n"
+ "fmla v26.4s, v6.4s, v4.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "fmla v23.4s, v7.4s, v3.s[2]\n"
+ "fmla v27.4s, v7.4s, v4.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "fmla v20.4s, v6.4s, v3.s[3]\n"
+ "fmla v24.4s, v6.4s, v4.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "fmla v21.4s, v7.4s, v3.s[3]\n"
+ "fmla v25.4s, v7.4s, v4.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v22.4s, v6.4s, v3.s[3]\n"
+ "fmla v26.4s, v6.4s, v4.s[3]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "fmla v23.4s, v7.4s, v3.s[3]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "fmla v27.4s, v7.4s, v4.s[3]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 155b\n"
+ "156:" // Height 5: Multiply loop: Single iteration only
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "fmla v24.4s, v6.4s, v4.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "add x21, x21, #0x10\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "fmla v25.4s, v7.4s, v4.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "fmla v26.4s, v6.4s, v4.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "fmla v27.4s, v7.4s, v4.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "fmla v20.4s, v6.4s, v3.s[1]\n"
+ "fmla v24.4s, v6.4s, v4.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "fmla v21.4s, v7.4s, v3.s[1]\n"
+ "fmla v25.4s, v7.4s, v4.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "fmla v22.4s, v6.4s, v3.s[1]\n"
+ "fmla v26.4s, v6.4s, v4.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "fmla v23.4s, v7.4s, v3.s[1]\n"
+ "fmla v27.4s, v7.4s, v4.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "fmla v20.4s, v6.4s, v3.s[2]\n"
+ "fmla v24.4s, v6.4s, v4.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "fmla v21.4s, v7.4s, v3.s[2]\n"
+ "fmla v25.4s, v7.4s, v4.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "fmla v22.4s, v6.4s, v3.s[2]\n"
+ "fmla v26.4s, v6.4s, v4.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "fmla v23.4s, v7.4s, v3.s[2]\n"
+ "fmla v27.4s, v7.4s, v4.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "fmla v20.4s, v6.4s, v3.s[3]\n"
+ "fmla v24.4s, v6.4s, v4.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "fmla v21.4s, v7.4s, v3.s[3]\n"
+ "fmla v25.4s, v7.4s, v4.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v22.4s, v6.4s, v3.s[3]\n"
+ "fmla v26.4s, v6.4s, v4.s[3]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "fmla v23.4s, v7.4s, v3.s[3]\n"
+ "fmla v27.4s, v7.4s, v4.s[3]\n"
+ "157:" // Height 5: Multiply loop: Main loop skip
+ "cbz x26, 159f\n"
+ "158:" // Height 5: Multiply loop: Odd block loop
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v24.4s, v6.4s, v4.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "fmla v25.4s, v7.4s, v4.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "fmla v26.4s, v6.4s, v4.s[0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "fmla v27.4s, v7.4s, v4.s[0]\n"
+ "cbnz x26, 158b\n"
+ "159:" // Height 5: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 152b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "tbz %x[flags], #1, 160f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v0.4s\n"
+ "fmax v25.4s, v25.4s, v0.4s\n"
+ "fmax v26.4s, v26.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v0.4s\n"
+ "160:" // Height 5: No activation
+ "cmp x13, #0x10\n"
+ "bge 169f\n"
+ "tbz x13, #3, 164f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 162f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 161f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "b 168f\n"
+ "161:" // Height 5: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 168f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "b 168f\n"
+ "162:" // Height 5: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 163f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "b 168f\n"
+ "163:" // Height 5: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 168f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "b 168f\n"
+ "164:" // Height 5: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 166f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 165f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "b 168f\n"
+ "165:" // Height 5: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 168f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "b 168f\n"
+ "166:" // Height 5: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 167f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #0, 168f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "b 168f\n"
+ "167:" // Height 5: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "168:" // Height 5: Partial direct writeback: Done
+ "b 170f\n"
+ "169:" // Height 5: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "170:" // Height 5: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 138b\n"
+ "b 206f\n"
+ "171:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "172:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0xc\n"
+ "bgt 173f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 173f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 173f\n"
+ "mov x10, x11\n"
+ "173:" // Height 6: B setup done
+ "cbz x14, 174f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
+ "mov v16.16b, v8.16b\n"
+ "mov v17.16b, v9.16b\n"
+ "add x14, x14, #0x40\n"
+ "mov v18.16b, v10.16b\n"
+ "mov v19.16b, v11.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "mov v24.16b, v8.16b\n"
+ "mov v25.16b, v9.16b\n"
+ "mov v26.16b, v10.16b\n"
+ "mov v27.16b, v11.16b\n"
+ "mov v28.16b, v8.16b\n"
+ "mov v29.16b, v9.16b\n"
+ "mov v30.16b, v10.16b\n"
+ "mov v31.16b, v11.16b\n"
+ "b 185f\n"
+ "174:" // Height 6: no bias
+ "tbz %x[flags], #0, 184f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "cmp x13, #0x10\n"
+ "add x20, x21, x19, LSL #2\n"
+ "bge 183f\n"
+ "tbz x13, #3, 178f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 176f\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 175f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
+ "b 182f\n"
+ "175:" // Height 6: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 182f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
+ "b 182f\n"
+ "176:" // Height 6: Partial accumulate: partial_2_8
+ "tbz x13, #1, 177f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
+ "b 182f\n"
+ "177:" // Height 6: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 182f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
+ "b 182f\n"
+ "178:" // Height 6: Partial accumulate: partial_4_0
+ "tbz x13, #2, 180f\n"
+ "ld1 { v8.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 179f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
+ "b 182f\n"
+ "179:" // Height 6: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 182f\n"
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
+ "b 182f\n"
+ "180:" // Height 6: Partial accumulate: partial_2_0
+ "tbz x13, #1, 181f\n"
+ "ldr d8, [x12], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #0, 182f\n"
+ "ld1 { v8.s }[2], [x12]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
+ "b 182f\n"
+ "181:" // Height 6: Partial accumulate: partial_1_0
+ "ldr s8, [x12, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
+ "182:" // Height 6: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 185f\n"
+ "183:" // Height 6: full accumulate
+ "ldr q8, [x12, #0x0]\n"
+ "ldr q9, [x12, #0x10]\n"
+ "ldr q10, [x12, #0x20]\n"
+ "ldr q11, [x12, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
+ "b 185f\n"
+ "184:" // Height 6: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "185:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "186:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 187f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 188f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
+ "b 188f\n"
+ "187:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "188:" // Height 6: input setup done
+ "cmp x26, #0x4\n"
+ "blt 191f\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "cmp x26, #0x8\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 190f\n"
+ "189:" // Height 6: Multiply loop: Main loop head
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "cmp x26, #0x8\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla v24.4s, v6.4s, v4.s[0]\n"
+ "fmla v28.4s, v6.4s, v5.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "add x20, x20, #0x10\n"
+ "fmla v25.4s, v7.4s, v4.s[0]\n"
+ "fmla v29.4s, v7.4s, v5.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "fmla v26.4s, v6.4s, v4.s[0]\n"
+ "fmla v30.4s, v6.4s, v5.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "fmla v27.4s, v7.4s, v4.s[0]\n"
+ "fmla v31.4s, v7.4s, v5.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "fmla v20.4s, v6.4s, v3.s[1]\n"
+ "fmla v24.4s, v6.4s, v4.s[1]\n"
+ "fmla v28.4s, v6.4s, v5.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "fmla v21.4s, v7.4s, v3.s[1]\n"
+ "fmla v25.4s, v7.4s, v4.s[1]\n"
+ "fmla v29.4s, v7.4s, v5.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "fmla v22.4s, v6.4s, v3.s[1]\n"
+ "fmla v26.4s, v6.4s, v4.s[1]\n"
+ "fmla v30.4s, v6.4s, v5.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "fmla v23.4s, v7.4s, v3.s[1]\n"
+ "fmla v27.4s, v7.4s, v4.s[1]\n"
+ "fmla v31.4s, v7.4s, v5.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "fmla v20.4s, v6.4s, v3.s[2]\n"
+ "fmla v24.4s, v6.4s, v4.s[2]\n"
+ "fmla v28.4s, v6.4s, v5.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "fmla v21.4s, v7.4s, v3.s[2]\n"
+ "fmla v25.4s, v7.4s, v4.s[2]\n"
+ "fmla v29.4s, v7.4s, v5.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "fmla v22.4s, v6.4s, v3.s[2]\n"
+ "fmla v26.4s, v6.4s, v4.s[2]\n"
+ "fmla v30.4s, v6.4s, v5.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "fmla v23.4s, v7.4s, v3.s[2]\n"
+ "fmla v27.4s, v7.4s, v4.s[2]\n"
+ "fmla v31.4s, v7.4s, v5.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "fmla v20.4s, v6.4s, v3.s[3]\n"
+ "fmla v24.4s, v6.4s, v4.s[3]\n"
+ "fmla v28.4s, v6.4s, v5.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "fmla v21.4s, v7.4s, v3.s[3]\n"
+ "fmla v25.4s, v7.4s, v4.s[3]\n"
+ "fmla v29.4s, v7.4s, v5.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v22.4s, v6.4s, v3.s[3]\n"
+ "fmla v26.4s, v6.4s, v4.s[3]\n"
+ "fmla v30.4s, v6.4s, v5.s[3]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "ldr q1, [x24, #0x0]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "fmla v23.4s, v7.4s, v3.s[3]\n"
+ "ldr q3, [x22, #0x0]\n"
+ "fmla v27.4s, v7.4s, v4.s[3]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "fmla v31.4s, v7.4s, v5.s[3]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "bge 189b\n"
+ "190:" // Height 6: Multiply loop: Single iteration only
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "sub x26, x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "fmla v24.4s, v6.4s, v4.s[0]\n"
+ "fmla v28.4s, v6.4s, v5.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "add x21, x21, #0x10\n"
+ "add x20, x20, #0x10\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "fmla v25.4s, v7.4s, v4.s[0]\n"
+ "fmla v29.4s, v7.4s, v5.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "fmla v26.4s, v6.4s, v4.s[0]\n"
+ "fmla v30.4s, v6.4s, v5.s[0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "fmla v27.4s, v7.4s, v4.s[0]\n"
+ "fmla v31.4s, v7.4s, v5.s[0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "fmla v8.4s, v6.4s, v0.s[1]\n"
+ "fmla v12.4s, v6.4s, v1.s[1]\n"
+ "fmla v16.4s, v6.4s, v2.s[1]\n"
+ "fmla v20.4s, v6.4s, v3.s[1]\n"
+ "fmla v24.4s, v6.4s, v4.s[1]\n"
+ "fmla v28.4s, v6.4s, v5.s[1]\n"
+ "ldr q6, [x9, #0x10]\n"
+ "fmla v9.4s, v7.4s, v0.s[1]\n"
+ "fmla v13.4s, v7.4s, v1.s[1]\n"
+ "fmla v17.4s, v7.4s, v2.s[1]\n"
+ "fmla v21.4s, v7.4s, v3.s[1]\n"
+ "fmla v25.4s, v7.4s, v4.s[1]\n"
+ "fmla v29.4s, v7.4s, v5.s[1]\n"
+ "ldr q7, [x28, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v1.s[1]\n"
+ "fmla v18.4s, v6.4s, v2.s[1]\n"
+ "fmla v22.4s, v6.4s, v3.s[1]\n"
+ "fmla v26.4s, v6.4s, v4.s[1]\n"
+ "fmla v30.4s, v6.4s, v5.s[1]\n"
+ "ldr q6, [x11, #0x20]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v15.4s, v7.4s, v1.s[1]\n"
+ "fmla v19.4s, v7.4s, v2.s[1]\n"
+ "fmla v23.4s, v7.4s, v3.s[1]\n"
+ "fmla v27.4s, v7.4s, v4.s[1]\n"
+ "fmla v31.4s, v7.4s, v5.s[1]\n"
+ "ldr q7, [x10, #0x20]\n"
+ "fmla v8.4s, v6.4s, v0.s[2]\n"
+ "fmla v12.4s, v6.4s, v1.s[2]\n"
+ "fmla v16.4s, v6.4s, v2.s[2]\n"
+ "fmla v20.4s, v6.4s, v3.s[2]\n"
+ "fmla v24.4s, v6.4s, v4.s[2]\n"
+ "fmla v28.4s, v6.4s, v5.s[2]\n"
+ "ldr q6, [x9, #0x20]\n"
+ "fmla v9.4s, v7.4s, v0.s[2]\n"
+ "fmla v13.4s, v7.4s, v1.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[2]\n"
+ "fmla v21.4s, v7.4s, v3.s[2]\n"
+ "fmla v25.4s, v7.4s, v4.s[2]\n"
+ "fmla v29.4s, v7.4s, v5.s[2]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "fmla v10.4s, v6.4s, v0.s[2]\n"
+ "fmla v14.4s, v6.4s, v1.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[2]\n"
+ "fmla v22.4s, v6.4s, v3.s[2]\n"
+ "fmla v26.4s, v6.4s, v4.s[2]\n"
+ "fmla v30.4s, v6.4s, v5.s[2]\n"
+ "ldr q6, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
+ "fmla v11.4s, v7.4s, v0.s[2]\n"
+ "fmla v15.4s, v7.4s, v1.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[2]\n"
+ "fmla v23.4s, v7.4s, v3.s[2]\n"
+ "fmla v27.4s, v7.4s, v4.s[2]\n"
+ "fmla v31.4s, v7.4s, v5.s[2]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ "fmla v8.4s, v6.4s, v0.s[3]\n"
+ "fmla v12.4s, v6.4s, v1.s[3]\n"
+ "fmla v16.4s, v6.4s, v2.s[3]\n"
+ "fmla v20.4s, v6.4s, v3.s[3]\n"
+ "fmla v24.4s, v6.4s, v4.s[3]\n"
+ "fmla v28.4s, v6.4s, v5.s[3]\n"
+ "ldr q6, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "fmla v9.4s, v7.4s, v0.s[3]\n"
+ "fmla v13.4s, v7.4s, v1.s[3]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "fmla v21.4s, v7.4s, v3.s[3]\n"
+ "fmla v25.4s, v7.4s, v4.s[3]\n"
+ "fmla v29.4s, v7.4s, v5.s[3]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ "fmla v10.4s, v6.4s, v0.s[3]\n"
+ "fmla v14.4s, v6.4s, v1.s[3]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v22.4s, v6.4s, v3.s[3]\n"
+ "fmla v26.4s, v6.4s, v4.s[3]\n"
+ "fmla v30.4s, v6.4s, v5.s[3]\n"
+ "fmla v11.4s, v7.4s, v0.s[3]\n"
+ "fmla v15.4s, v7.4s, v1.s[3]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "fmla v23.4s, v7.4s, v3.s[3]\n"
+ "fmla v27.4s, v7.4s, v4.s[3]\n"
+ "fmla v31.4s, v7.4s, v5.s[3]\n"
+ "191:" // Height 6: Multiply loop: Main loop skip
+ "cbz x26, 193f\n"
+ "192:" // Height 6: Multiply loop: Odd block loop
+ "ldr s0, [x25], #0x4\n"
+ "ldr s1, [x24], #0x4\n"
+ "sub x26, x26, #0x1\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s4, [x21], #0x4\n"
+ "ldr s5, [x20], #0x4\n"
+ "ldr q6, [x11, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v12.4s, v6.4s, v1.s[0]\n"
+ "fmla v16.4s, v6.4s, v2.s[0]\n"
+ "fmla v20.4s, v6.4s, v3.s[0]\n"
+ "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
+ "fmla v24.4s, v6.4s, v4.s[0]\n"
+ "fmla v28.4s, v6.4s, v5.s[0]\n"
+ "ldr q6, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v13.4s, v7.4s, v1.s[0]\n"
+ "fmla v17.4s, v7.4s, v2.s[0]\n"
+ "fmla v21.4s, v7.4s, v3.s[0]\n"
+ "fmla v25.4s, v7.4s, v4.s[0]\n"
+ "fmla v29.4s, v7.4s, v5.s[0]\n"
+ "ldr q7, [x28, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v14.4s, v6.4s, v1.s[0]\n"
+ "fmla v18.4s, v6.4s, v2.s[0]\n"
+ "fmla v22.4s, v6.4s, v3.s[0]\n"
+ "fmla v26.4s, v6.4s, v4.s[0]\n"
+ "fmla v30.4s, v6.4s, v5.s[0]\n"
+ "fmla v11.4s, v7.4s, v0.s[0]\n"
+ "fmla v15.4s, v7.4s, v1.s[0]\n"
+ "fmla v19.4s, v7.4s, v2.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[0]\n"
+ "fmla v27.4s, v7.4s, v4.s[0]\n"
+ "fmla v31.4s, v7.4s, v5.s[0]\n"
+ "cbnz x26, 192b\n"
+ "193:" // Height 6: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 186b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "tbz %x[flags], #1, 194f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v1.4s\n"
+ "fmin v28.4s, v28.4s, v1.4s\n"
+ "fmin v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v1.4s\n"
+ "fmin v31.4s, v31.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v0.4s\n"
+ "fmax v25.4s, v25.4s, v0.4s\n"
+ "fmax v26.4s, v26.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v0.4s\n"
+ "fmax v28.4s, v28.4s, v0.4s\n"
+ "fmax v29.4s, v29.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v0.4s\n"
+ "fmax v31.4s, v31.4s, v0.4s\n"
+ "194:" // Height 6: No activation
+ "cmp x13, #0x10\n"
+ "bge 203f\n"
+ "tbz x13, #3, 198f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 196f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 195f\n"
+ "str d11, [x12], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v31.s }[2], [x20]\n"
+ "b 202f\n"
+ "195:" // Height 6: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 202f\n"
+ "str s11, [x12, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s31, [x20, #0x0]\n"
+ "b 202f\n"
+ "196:" // Height 6: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 197f\n"
+ "str d10, [x12], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
+ "b 202f\n"
+ "197:" // Height 6: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 202f\n"
+ "str s10, [x12, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
+ "b 202f\n"
+ "198:" // Height 6: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 200f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 199f\n"
+ "str d9, [x12], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "b 202f\n"
+ "199:" // Height 6: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 202f\n"
+ "str s9, [x12, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
+ "b 202f\n"
+ "200:" // Height 6: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 201f\n"
+ "str d8, [x12], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
+ "tbz x13, #0, 202f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
+ "b 202f\n"
+ "201:" // Height 6: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
+ "202:" // Height 6: Partial direct writeback: Done
+ "b 204f\n"
+ "203:" // Height 6: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
+ "204:" // Height 6: Writeback done
+ "subs x13, x13, #0x10\n"
+ "bgt 172b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 206f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 205f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "205:" // Update direct input
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "206:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp
new file mode 100644
index 0000000000..af2c1e5ae0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<float>, \
+ size_t, size_t, \
+ const bfloat16 *, \
+ size_t, \
+ IndirectOutputArg<float>, \
+ const float *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffhybrid_fp32bf16fp32_mmla_4x24( ARGLIST );
+
+class cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24
+{
+public:
+ typedef float lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 4;
+ }
+ static unsigned int stripe_width()
+ {
+ return 4;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL256_BL64_BF16;
+ }
+
+ static unsigned int out_width()
+ {
+ return 24;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsFixed<rhs_operand_type, result_type, 4, 24, 4> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 28.48 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffhybrid_fp32bf16fp32_mmla_4x24;
+ cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
new file mode 100644
index 0000000000..245e653a43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
@@ -0,0 +1,2561 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+#include "../../bfloat.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<float> A_arg,
+ size_t M, size_t N, const bfloat16 *B_ptr, size_t B_stride, IndirectOutputArg<float> output_arg,
+ const float *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const bfloat16 *B_ptr = {};
+ const bfloat16 *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x4\n"
+ "bge 133f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 89f\n"
+ "beq 45f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
+ "bgt 3f\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
+ "bgt 3f\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
+ "bgt 3f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "cbz x14, 4f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
+ "zip2 v16.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "zip2 v17.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "add x14, x14, #0x60\n"
+ "zip2 v18.2d, v12.2d, v12.2d\n"
+ "zip1 v12.2d, v12.2d, v12.2d\n"
+ "zip2 v19.2d, v13.2d, v13.2d\n"
+ "zip1 v13.2d, v13.2d, v13.2d\n"
+ "b 20f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 19f\n"
+ "cmp x13, #0x18\n"
+ "bge 17f\n"
+ "tbz x13, #4, 8f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 6f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 5f\n"
+ "ldr d20, [x12], #0x8\n"
+ "mov x19, #0x58\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v20.s }[2], [x12]\n"
+ "b 16f\n"
+ "5:" // Height 1: Partial accumulate: partial_1_20
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s20, [x12, #0x0]\n"
+ "b 16f\n"
+ "6:" // Height 1: Partial accumulate: partial_2_16
+ "tbz x13, #1, 7f\n"
+ "ldr d13, [x12], #0x8\n"
+ "mov x19, #0x48\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v13.s }[2], [x12]\n"
+ "b 16f\n"
+ "7:" // Height 1: Partial accumulate: partial_1_16
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s13, [x12, #0x0]\n"
+ "b 16f\n"
+ "8:" // Height 1: Partial accumulate: partial_8_0
+ "tbz x13, #3, 12f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 10f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 9f\n"
+ "ldr d12, [x12], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v12.s }[2], [x12]\n"
+ "b 16f\n"
+ "9:" // Height 1: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s12, [x12, #0x0]\n"
+ "b 16f\n"
+ "10:" // Height 1: Partial accumulate: partial_2_8
+ "tbz x13, #1, 11f\n"
+ "ldr d11, [x12], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "b 16f\n"
+ "11:" // Height 1: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "b 16f\n"
+ "12:" // Height 1: Partial accumulate: partial_4_0
+ "tbz x13, #2, 14f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 13f\n"
+ "ldr d10, [x12], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "b 16f\n"
+ "13:" // Height 1: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 16f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "b 16f\n"
+ "14:" // Height 1: Partial accumulate: partial_2_0
+ "tbz x13, #1, 15f\n"
+ "ldr d9, [x12], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 16f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "b 16f\n"
+ "15:" // Height 1: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "mov x19, #0x0\n"
+ "16:" // Height 1: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 18f\n"
+ "17:" // Height 1: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
+ "18:" // Height 1: MMLA fixup
+ "zip1 v8.2d, v9.2d, v14.2d\n"
+ "zip2 v14.2d, v9.2d, v14.2d\n"
+ "zip1 v9.2d, v10.2d, v15.2d\n"
+ "zip2 v15.2d, v10.2d, v15.2d\n"
+ "zip1 v10.2d, v11.2d, v16.2d\n"
+ "zip2 v16.2d, v11.2d, v16.2d\n"
+ "zip1 v11.2d, v12.2d, v17.2d\n"
+ "zip2 v17.2d, v12.2d, v17.2d\n"
+ "zip1 v12.2d, v13.2d, v18.2d\n"
+ "zip2 v18.2d, v13.2d, v18.2d\n"
+ "zip1 v13.2d, v20.2d, v19.2d\n"
+ "zip2 v19.2d, v20.2d, v19.2d\n"
+ "b 20f\n"
+ "19:" // Height 1: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "20:" // Height 1: setup done
+ "mov x25, #0x0\n"
+ "21:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 22f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "b 23f\n"
+ "22:" // Height 1: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "23:" // Height 1: input setup done
+ "cmp x24, #0x4\n"
+ "blt 26f\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ldr q4, [x11, #0x0]\n"
+ "cmp x24, #0x8\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 25f\n"
+ "24:" // Height 1: Multiply loop: Main loop head
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "add x11, x11, #0x20\n"
+ "ldr q4, [x11, #0x0]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
+ "bge 24b\n"
+ "25:" // Height 1: Multiply loop: Single iteration only
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
+ "26:" // Height 1: Multiply loop: Main loop skip
+ "cbz x24, 29f\n"
+ "cbz x24, 29f\n"
+ "tbz x24, #1, 27f\n"
+ "ldr d0, [x23], #0x8\n"
+ "tbz x24, #0, 28f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "b 28f\n"
+ "27:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
+ "ldr s0, [x23, #0x0]\n"
+ "28:" // Height 1: Multiply loop: Ragged operand read: Done
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ "ldr q7, [x26, #0x10]\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
+ "29:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 21b\n"
+ "uzp1 v8.2d, v8.2d, v14.2d\n"
+ "uzp1 v9.2d, v9.2d, v15.2d\n"
+ "uzp1 v10.2d, v10.2d, v16.2d\n"
+ "uzp1 v11.2d, v11.2d, v17.2d\n"
+ "uzp1 v12.2d, v12.2d, v18.2d\n"
+ "uzp1 v13.2d, v13.2d, v19.2d\n"
+ "tbz %x[flags], #1, 30f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "30:" // Height 1: No activation
+ "cmp x13, #0x18\n"
+ "bge 43f\n"
+ "tbz x13, #4, 34f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "st1 { v11.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 32f\n"
+ "st1 { v12.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 31f\n"
+ "str d13, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v13.s }[2], [x12]\n"
+ "b 42f\n"
+ "31:" // Height 1: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 42f\n"
+ "str s13, [x12, #0x0]\n"
+ "b 42f\n"
+ "32:" // Height 1: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 33f\n"
+ "str d12, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v12.s }[2], [x12]\n"
+ "b 42f\n"
+ "33:" // Height 1: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 42f\n"
+ "str s12, [x12, #0x0]\n"
+ "b 42f\n"
+ "34:" // Height 1: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 38f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "st1 { v9.4s }, [x12], #0x10\n"
+ "tbz x13, #2, 36f\n"
+ "st1 { v10.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 35f\n"
+ "str d11, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v11.s }[2], [x12]\n"
+ "b 42f\n"
+ "35:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 42f\n"
+ "str s11, [x12, #0x0]\n"
+ "b 42f\n"
+ "36:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 37f\n"
+ "str d10, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v10.s }[2], [x12]\n"
+ "b 42f\n"
+ "37:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 42f\n"
+ "str s10, [x12, #0x0]\n"
+ "b 42f\n"
+ "38:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 40f\n"
+ "st1 { v8.4s }, [x12], #0x10\n"
+ "tbz x13, #1, 39f\n"
+ "str d9, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v9.s }[2], [x12]\n"
+ "b 42f\n"
+ "39:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 42f\n"
+ "str s9, [x12, #0x0]\n"
+ "b 42f\n"
+ "40:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 41f\n"
+ "str d8, [x12], #0x8\n"
+ "tbz x13, #0, 42f\n"
+ "st1 { v8.s }[2], [x12]\n"
+ "b 42f\n"
+ "41:" // Height 1: Partial direct writeback: partial_1_0
+ "str s8, [x12, #0x0]\n"
+ "42:" // Height 1: Partial direct writeback: Done
+ "b 44f\n"
+ "43:" // Height 1: Full writeback
+ "str q8, [x12, #0x0]\n"
+ "str q9, [x12, #0x10]\n"
+ "str q10, [x12, #0x20]\n"
+ "str q11, [x12, #0x30]\n"
+ "str q12, [x12, #0x40]\n"
+ "str q13, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
+ "44:" // Height 1: Writeback done
+ "subs x13, x13, #0x18\n"
+ "bgt 2b\n"
+ "b 178f\n"
+ "45:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "46:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
+ "bgt 47f\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
+ "bgt 47f\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
+ "bgt 47f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 47f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 47f\n"
+ "mov x10, x11\n"
+ "47:" // Height 2: B setup done
+ "cbz x14, 48f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
+ "zip2 v16.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "zip2 v17.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "add x14, x14, #0x60\n"
+ "zip2 v18.2d, v12.2d, v12.2d\n"
+ "zip1 v12.2d, v12.2d, v12.2d\n"
+ "zip2 v19.2d, v13.2d, v13.2d\n"
+ "zip1 v13.2d, v13.2d, v13.2d\n"
+ "b 64f\n"
+ "48:" // Height 2: no bias
+ "tbz %x[flags], #0, 63f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x13, #0x18\n"
+ "add x22, x12, x19, LSL #2\n"
+ "bge 61f\n"
+ "tbz x13, #4, 52f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 50f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 49f\n"
+ "ldr d20, [x12], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v20.s }[2], [x12]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "b 60f\n"
+ "49:" // Height 2: Partial accumulate: partial_1_20
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s20, [x12, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "b 60f\n"
+ "50:" // Height 2: Partial accumulate: partial_2_16
+ "tbz x13, #1, 51f\n"
+ "ldr d13, [x12], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v13.s }[2], [x12]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "b 60f\n"
+ "51:" // Height 2: Partial accumulate: partial_1_16
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s13, [x12, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "b 60f\n"
+ "52:" // Height 2: Partial accumulate: partial_8_0
+ "tbz x13, #3, 56f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 54f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 53f\n"
+ "ldr d12, [x12], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v12.s }[2], [x12]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "b 60f\n"
+ "53:" // Height 2: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s12, [x12, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "b 60f\n"
+ "54:" // Height 2: Partial accumulate: partial_2_8
+ "tbz x13, #1, 55f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "b 60f\n"
+ "55:" // Height 2: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "b 60f\n"
+ "56:" // Height 2: Partial accumulate: partial_4_0
+ "tbz x13, #2, 58f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 57f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "b 60f\n"
+ "57:" // Height 2: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 60f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "b 60f\n"
+ "58:" // Height 2: Partial accumulate: partial_2_0
+ "tbz x13, #1, 59f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "tbz x13, #0, 60f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "b 60f\n"
+ "59:" // Height 2: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s14, [x22, #0x0]\n"
+ "mov x19, #0x0\n"
+ "60:" // Height 2: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 62f\n"
+ "61:" // Height 2: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "62:" // Height 2: MMLA fixup
+ "zip1 v8.2d, v9.2d, v14.2d\n"
+ "zip2 v14.2d, v9.2d, v14.2d\n"
+ "zip1 v9.2d, v10.2d, v15.2d\n"
+ "zip2 v15.2d, v10.2d, v15.2d\n"
+ "zip1 v10.2d, v11.2d, v16.2d\n"
+ "zip2 v16.2d, v11.2d, v16.2d\n"
+ "zip1 v11.2d, v12.2d, v17.2d\n"
+ "zip2 v17.2d, v12.2d, v17.2d\n"
+ "zip1 v12.2d, v13.2d, v18.2d\n"
+ "zip2 v18.2d, v13.2d, v18.2d\n"
+ "zip1 v13.2d, v20.2d, v19.2d\n"
+ "zip2 v19.2d, v20.2d, v19.2d\n"
+ "b 64f\n"
+ "63:" // Height 2: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "64:" // Height 2: setup done
+ "mov x25, #0x0\n"
+ "65:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 66f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 67f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "b 67f\n"
+ "66:" // Height 2: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "67:" // Height 2: input setup done
+ "cmp x24, #0x4\n"
+ "blt 70f\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "cmp x24, #0x8\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 69f\n"
+ "68:" // Height 2: Multiply loop: Main loop head
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x8\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x11, #0x0]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "add x9, x9, #0x20\n"
+ "ldr q7, [x10, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
+ "bge 68b\n"
+ "69:" // Height 2: Multiply loop: Single iteration only
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "sub x24, x24, #0x4\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
+ "70:" // Height 2: Multiply loop: Main loop skip
+ "cbz x24, 73f\n"
+ "cbz x24, 73f\n"
+ "tbz x24, #1, 71f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "tbz x24, #0, 72f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
+ "b 72f\n"
+ "71:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
+ "72:" // Height 2: Multiply loop: Ragged operand read: Done
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ "ldr q7, [x26, #0x10]\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "add x26, x26, #0x20\n"
+ "73:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 65b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 v4.2d, v8.2d, v14.2d\n"
+ "uzp2 v8.2d, v8.2d, v14.2d\n"
+ "add x22, x12, x19, LSL #2\n"
+ "uzp1 v14.2d, v9.2d, v15.2d\n"
+ "uzp2 v9.2d, v9.2d, v15.2d\n"
+ "uzp1 v15.2d, v10.2d, v16.2d\n"
+ "uzp2 v10.2d, v10.2d, v16.2d\n"
+ "uzp1 v16.2d, v11.2d, v17.2d\n"
+ "uzp2 v11.2d, v11.2d, v17.2d\n"
+ "uzp1 v17.2d, v12.2d, v18.2d\n"
+ "uzp2 v12.2d, v12.2d, v18.2d\n"
+ "uzp1 v18.2d, v13.2d, v19.2d\n"
+ "uzp2 v13.2d, v13.2d, v19.2d\n"
+ "tbz %x[flags], #1, 74f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v4.4s, v4.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmax v4.4s, v4.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "74:" // Height 2: No activation
+ "cmp x13, #0x18\n"
+ "bge 87f\n"
+ "tbz x13, #4, 78f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v16.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 76f\n"
+ "st1 { v17.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 75f\n"
+ "str d18, [x12], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v18.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x22]\n"
+ "b 86f\n"
+ "75:" // Height 2: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 86f\n"
+ "str s18, [x12, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
+ "b 86f\n"
+ "76:" // Height 2: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 77f\n"
+ "str d17, [x12], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v17.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x22]\n"
+ "b 86f\n"
+ "77:" // Height 2: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 86f\n"
+ "str s17, [x12, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
+ "b 86f\n"
+ "78:" // Height 2: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 82f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "tbz x13, #2, 80f\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 79f\n"
+ "str d16, [x12], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v16.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x22]\n"
+ "b 86f\n"
+ "79:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 86f\n"
+ "str s16, [x12, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "b 86f\n"
+ "80:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 81f\n"
+ "str d15, [x12], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v15.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x22]\n"
+ "b 86f\n"
+ "81:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 86f\n"
+ "str s15, [x12, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
+ "b 86f\n"
+ "82:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 84f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "tbz x13, #1, 83f\n"
+ "str d14, [x12], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x22]\n"
+ "b 86f\n"
+ "83:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 86f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
+ "b 86f\n"
+ "84:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 85f\n"
+ "str d4, [x12], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "tbz x13, #0, 86f\n"
+ "st1 { v4.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x22]\n"
+ "b 86f\n"
+ "85:" // Height 2: Partial direct writeback: partial_1_0
+ "str s4, [x12, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
+ "86:" // Height 2: Partial direct writeback: Done
+ "b 88f\n"
+ "87:" // Height 2: Full writeback
+ "str q4, [x12, #0x0]\n"
+ "str q14, [x12, #0x10]\n"
+ "str q15, [x12, #0x20]\n"
+ "str q16, [x12, #0x30]\n"
+ "str q17, [x12, #0x40]\n"
+ "str q18, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
+ "88:" // Height 2: Writeback done
+ "subs x13, x13, #0x18\n"
+ "bgt 46b\n"
+ "b 178f\n"
+ "89:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "90:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
+ "bgt 91f\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
+ "bgt 91f\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
+ "bgt 91f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 91f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 91f\n"
+ "mov x10, x11\n"
+ "91:" // Height 3: B setup done
+ "cbz x14, 92f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
+ "zip2 v16.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "zip2 v17.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "add x14, x14, #0x60\n"
+ "zip2 v18.2d, v12.2d, v12.2d\n"
+ "zip1 v12.2d, v12.2d, v12.2d\n"
+ "zip2 v19.2d, v13.2d, v13.2d\n"
+ "zip1 v13.2d, v13.2d, v13.2d\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v26.16b, v14.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v27.16b, v15.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "mov v29.16b, v17.16b\n"
+ "mov v24.16b, v12.16b\n"
+ "mov v30.16b, v18.16b\n"
+ "mov v25.16b, v13.16b\n"
+ "mov v31.16b, v19.16b\n"
+ "b 108f\n"
+ "92:" // Height 3: no bias
+ "tbz %x[flags], #0, 107f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "cmp x13, #0x18\n"
+ "add x21, x22, x19, LSL #2\n"
+ "bge 105f\n"
+ "tbz x13, #4, 96f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 94f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 93f\n"
+ "ldr d20, [x12], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d4, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v20.s }[2], [x12]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
+ "b 104f\n"
+ "93:" // Height 3: Partial accumulate: partial_1_20
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s20, [x12, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
+ "b 104f\n"
+ "94:" // Height 3: Partial accumulate: partial_2_16
+ "tbz x13, #1, 95f\n"
+ "ldr d13, [x12], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v13.s }[2], [x12]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "b 104f\n"
+ "95:" // Height 3: Partial accumulate: partial_1_16
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s13, [x12, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "b 104f\n"
+ "96:" // Height 3: Partial accumulate: partial_8_0
+ "tbz x13, #3, 100f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 98f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 97f\n"
+ "ldr d12, [x12], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v12.s }[2], [x12]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "b 104f\n"
+ "97:" // Height 3: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s12, [x12, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "b 104f\n"
+ "98:" // Height 3: Partial accumulate: partial_2_8
+ "tbz x13, #1, 99f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d23, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "b 104f\n"
+ "99:" // Height 3: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "b 104f\n"
+ "100:" // Height 3: Partial accumulate: partial_4_0
+ "tbz x13, #2, 102f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 101f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d22, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "b 104f\n"
+ "101:" // Height 3: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 104f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "b 104f\n"
+ "102:" // Height 3: Partial accumulate: partial_2_0
+ "tbz x13, #1, 103f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "tbz x13, #0, 104f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "b 104f\n"
+ "103:" // Height 3: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s14, [x22, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s21, [x21, #0x0]\n"
+ "104:" // Height 3: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 106f\n"
+ "105:" // Height 3: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q21, [x21, #0x0]\n"
+ "ldr q22, [x21, #0x10]\n"
+ "ldr q23, [x21, #0x20]\n"
+ "ldr q24, [x21, #0x30]\n"
+ "ldr q25, [x21, #0x40]\n"
+ "ldr q4, [x21, #0x50]\n"
+ "106:" // Height 3: MMLA fixup
+ "zip1 v8.2d, v9.2d, v14.2d\n"
+ "zip2 v14.2d, v9.2d, v14.2d\n"
+ "zip1 v9.2d, v10.2d, v15.2d\n"
+ "zip2 v15.2d, v10.2d, v15.2d\n"
+ "zip1 v10.2d, v11.2d, v16.2d\n"
+ "zip2 v16.2d, v11.2d, v16.2d\n"
+ "zip1 v11.2d, v12.2d, v17.2d\n"
+ "zip2 v17.2d, v12.2d, v17.2d\n"
+ "zip1 v12.2d, v13.2d, v18.2d\n"
+ "zip2 v18.2d, v13.2d, v18.2d\n"
+ "zip1 v13.2d, v20.2d, v19.2d\n"
+ "zip2 v19.2d, v20.2d, v19.2d\n"
+ "zip1 v20.2d, v21.2d, v26.2d\n"
+ "zip2 v26.2d, v21.2d, v26.2d\n"
+ "zip1 v21.2d, v22.2d, v27.2d\n"
+ "zip2 v27.2d, v22.2d, v27.2d\n"
+ "zip1 v22.2d, v23.2d, v28.2d\n"
+ "zip2 v28.2d, v23.2d, v28.2d\n"
+ "zip1 v23.2d, v24.2d, v29.2d\n"
+ "zip2 v29.2d, v24.2d, v29.2d\n"
+ "zip1 v24.2d, v25.2d, v30.2d\n"
+ "zip2 v30.2d, v25.2d, v30.2d\n"
+ "zip1 v25.2d, v4.2d, v31.2d\n"
+ "zip2 v31.2d, v4.2d, v31.2d\n"
+ "b 108f\n"
+ "107:" // Height 3: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "108:" // Height 3: setup done
+ "mov x25, #0x0\n"
+ "109:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 110f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 111f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "b 111f\n"
+ "110:" // Height 3: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "111:" // Height 3: input setup done
+ "cmp x24, #0x4\n"
+ "blt 114f\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "cmp x24, #0x8\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 113f\n"
+ "112:" // Height 3: Multiply loop: Main loop head
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "sub x24, x24, #0x4\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "add x26, x26, #0x20\n"
+ ".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x11, #0x0]\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ ".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 112b\n"
+ "113:" // Height 3: Multiply loop: Single iteration only
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "sub x24, x24, #0x4\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
+ "114:" // Height 3: Multiply loop: Main loop skip
+ "cbz x24, 117f\n"
+ "cbz x24, 117f\n"
+ "tbz x24, #1, 115f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x21], #0x8\n"
+ "tbz x24, #0, 116f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
+ "ld1 { v2.s }[2], [x21]\n"
+ "b 116f\n"
+ "115:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
+ "ldr s2, [x21, #0x0]\n"
+ "116:" // Height 3: Multiply loop: Ragged operand read: Done
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ "add x26, x26, #0x20\n"
+ ".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
+ "117:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 109b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "uzp1 v4.2d, v8.2d, v14.2d\n"
+ "uzp2 v8.2d, v8.2d, v14.2d\n"
+ "uzp1 v14.2d, v9.2d, v15.2d\n"
+ "uzp2 v9.2d, v9.2d, v15.2d\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp1 v15.2d, v10.2d, v16.2d\n"
+ "uzp2 v10.2d, v10.2d, v16.2d\n"
+ "uzp1 v16.2d, v11.2d, v17.2d\n"
+ "uzp2 v11.2d, v11.2d, v17.2d\n"
+ "uzp1 v17.2d, v12.2d, v18.2d\n"
+ "uzp2 v12.2d, v12.2d, v18.2d\n"
+ "uzp1 v18.2d, v13.2d, v19.2d\n"
+ "uzp2 v13.2d, v13.2d, v19.2d\n"
+ "uzp1 v20.2d, v20.2d, v26.2d\n"
+ "uzp1 v21.2d, v21.2d, v27.2d\n"
+ "uzp1 v22.2d, v22.2d, v28.2d\n"
+ "uzp1 v23.2d, v23.2d, v29.2d\n"
+ "uzp1 v24.2d, v24.2d, v30.2d\n"
+ "uzp1 v25.2d, v25.2d, v31.2d\n"
+ "tbz %x[flags], #1, 118f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v4.4s, v4.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmax v4.4s, v4.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v0.4s\n"
+ "fmax v25.4s, v25.4s, v0.4s\n"
+ "118:" // Height 3: No activation
+ "cmp x13, #0x18\n"
+ "bge 131f\n"
+ "tbz x13, #4, 122f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v16.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 120f\n"
+ "st1 { v17.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 119f\n"
+ "str d18, [x12], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v18.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "b 130f\n"
+ "119:" // Height 3: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 130f\n"
+ "str s18, [x12, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "b 130f\n"
+ "120:" // Height 3: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 121f\n"
+ "str d17, [x12], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v17.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "b 130f\n"
+ "121:" // Height 3: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 130f\n"
+ "str s17, [x12, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "b 130f\n"
+ "122:" // Height 3: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 126f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v21.4s }, [x21], #0x10\n"
+ "tbz x13, #2, 124f\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 123f\n"
+ "str d16, [x12], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v16.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "b 130f\n"
+ "123:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 130f\n"
+ "str s16, [x12, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "b 130f\n"
+ "124:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 125f\n"
+ "str d15, [x12], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v15.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x22]\n"
+ "st1 { v22.s }[2], [x21]\n"
+ "b 130f\n"
+ "125:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 130f\n"
+ "str s15, [x12, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
+ "str s22, [x21, #0x0]\n"
+ "b 130f\n"
+ "126:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 128f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v20.4s }, [x21], #0x10\n"
+ "tbz x13, #1, 127f\n"
+ "str d14, [x12], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x22]\n"
+ "st1 { v21.s }[2], [x21]\n"
+ "b 130f\n"
+ "127:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 130f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
+ "str s21, [x21, #0x0]\n"
+ "b 130f\n"
+ "128:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 129f\n"
+ "str d4, [x12], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz x13, #0, 130f\n"
+ "st1 { v4.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x22]\n"
+ "st1 { v20.s }[2], [x21]\n"
+ "b 130f\n"
+ "129:" // Height 3: Partial direct writeback: partial_1_0
+ "str s4, [x12, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
+ "str s20, [x21, #0x0]\n"
+ "130:" // Height 3: Partial direct writeback: Done
+ "b 132f\n"
+ "131:" // Height 3: Full writeback
+ "str q4, [x12, #0x0]\n"
+ "str q14, [x12, #0x10]\n"
+ "str q15, [x12, #0x20]\n"
+ "str q16, [x12, #0x30]\n"
+ "str q17, [x12, #0x40]\n"
+ "str q18, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
+ "str q20, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q22, [x21, #0x20]\n"
+ "str q23, [x21, #0x30]\n"
+ "str q24, [x21, #0x40]\n"
+ "str q25, [x21, #0x50]\n"
+ "132:" // Height 3: Writeback done
+ "subs x13, x13, #0x18\n"
+ "bgt 90b\n"
+ "b 178f\n"
+ "133:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "134:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x13, #0x14\n"
+ "bgt 135f\n"
+ "cmp x13, #0x10\n"
+ "mov x26, x11\n"
+ "bgt 135f\n"
+ "cmp x13, #0xc\n"
+ "mov x27, x11\n"
+ "bgt 135f\n"
+ "cmp x13, #0x8\n"
+ "mov x28, x11\n"
+ "bgt 135f\n"
+ "cmp x13, #0x4\n"
+ "mov x9, x11\n"
+ "bgt 135f\n"
+ "mov x10, x11\n"
+ "135:" // Height 4: B setup done
+ "cbz x14, 136f\n"
+ "ldr q8, [x14, #0x0]\n"
+ "ldr q9, [x14, #0x10]\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "ldr q10, [x14, #0x20]\n"
+ "ldr q11, [x14, #0x30]\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
+ "ldr q12, [x14, #0x40]\n"
+ "ldr q13, [x14, #0x50]\n"
+ "zip2 v16.2d, v10.2d, v10.2d\n"
+ "zip1 v10.2d, v10.2d, v10.2d\n"
+ "zip2 v17.2d, v11.2d, v11.2d\n"
+ "zip1 v11.2d, v11.2d, v11.2d\n"
+ "add x14, x14, #0x60\n"
+ "zip2 v18.2d, v12.2d, v12.2d\n"
+ "zip1 v12.2d, v12.2d, v12.2d\n"
+ "zip2 v19.2d, v13.2d, v13.2d\n"
+ "zip1 v13.2d, v13.2d, v13.2d\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v26.16b, v14.16b\n"
+ "mov v21.16b, v9.16b\n"
+ "mov v27.16b, v15.16b\n"
+ "mov v22.16b, v10.16b\n"
+ "mov v28.16b, v16.16b\n"
+ "mov v23.16b, v11.16b\n"
+ "mov v29.16b, v17.16b\n"
+ "mov v24.16b, v12.16b\n"
+ "mov v30.16b, v18.16b\n"
+ "mov v25.16b, v13.16b\n"
+ "mov v31.16b, v19.16b\n"
+ "b 152f\n"
+ "136:" // Height 4: no bias
+ "tbz %x[flags], #0, 151f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "cmp x13, #0x18\n"
+ "add x20, x21, x19, LSL #2\n"
+ "bge 149f\n"
+ "tbz x13, #4, 140f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v12.4s }, [x12], #0x10\n"
+ "ld1 { v17.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 138f\n"
+ "ld1 { v13.4s }, [x12], #0x10\n"
+ "ld1 { v18.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 137f\n"
+ "ldr d20, [x12], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
+ "mov x19, #0x58\n"
+ "ldr d4, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v20.s }[2], [x12]\n"
+ "ld1 { v19.s }[2], [x22]\n"
+ "ld1 { v4.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
+ "b 148f\n"
+ "137:" // Height 4: Partial accumulate: partial_1_20
+ "mov x19, #0x50\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s20, [x12, #0x0]\n"
+ "ldr s19, [x22, #0x0]\n"
+ "ldr s4, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
+ "b 148f\n"
+ "138:" // Height 4: Partial accumulate: partial_2_16
+ "tbz x13, #1, 139f\n"
+ "ldr d13, [x12], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
+ "mov x19, #0x48\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v13.s }[2], [x12]\n"
+ "ld1 { v18.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
+ "b 148f\n"
+ "139:" // Height 4: Partial accumulate: partial_1_16
+ "mov x19, #0x40\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s13, [x12, #0x0]\n"
+ "ldr s18, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
+ "b 148f\n"
+ "140:" // Height 4: Partial accumulate: partial_8_0
+ "tbz x13, #3, 144f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x12], #0x10\n"
+ "ld1 { v15.4s }, [x22], #0x10\n"
+ "ld1 { v22.4s }, [x21], #0x10\n"
+ "ld1 { v27.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 142f\n"
+ "ld1 { v11.4s }, [x12], #0x10\n"
+ "ld1 { v16.4s }, [x22], #0x10\n"
+ "ld1 { v23.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 141f\n"
+ "ldr d12, [x12], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "mov x19, #0x38\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v12.s }[2], [x12]\n"
+ "ld1 { v17.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
+ "b 148f\n"
+ "141:" // Height 4: Partial accumulate: partial_1_12
+ "mov x19, #0x30\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s12, [x12, #0x0]\n"
+ "ldr s17, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
+ "b 148f\n"
+ "142:" // Height 4: Partial accumulate: partial_2_8
+ "tbz x13, #1, 143f\n"
+ "ldr d11, [x12], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
+ "mov x19, #0x28\n"
+ "ldr d23, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v11.s }[2], [x12]\n"
+ "ld1 { v16.s }[2], [x22]\n"
+ "ld1 { v23.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
+ "b 148f\n"
+ "143:" // Height 4: Partial accumulate: partial_1_8
+ "mov x19, #0x20\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s11, [x12, #0x0]\n"
+ "ldr s16, [x22, #0x0]\n"
+ "ldr s23, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
+ "b 148f\n"
+ "144:" // Height 4: Partial accumulate: partial_4_0
+ "tbz x13, #2, 146f\n"
+ "ld1 { v9.4s }, [x12], #0x10\n"
+ "ld1 { v14.4s }, [x22], #0x10\n"
+ "ld1 { v21.4s }, [x21], #0x10\n"
+ "ld1 { v26.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 145f\n"
+ "ldr d10, [x12], #0x8\n"
+ "ldr d15, [x22], #0x8\n"
+ "mov x19, #0x18\n"
+ "ldr d22, [x21], #0x8\n"
+ "ldr d27, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v10.s }[2], [x12]\n"
+ "ld1 { v15.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x20]\n"
+ "b 148f\n"
+ "145:" // Height 4: Partial accumulate: partial_1_4
+ "mov x19, #0x10\n"
+ "tbz x13, #0, 148f\n"
+ "ldr s10, [x12, #0x0]\n"
+ "ldr s15, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
+ "ldr s27, [x20, #0x0]\n"
+ "b 148f\n"
+ "146:" // Height 4: Partial accumulate: partial_2_0
+ "tbz x13, #1, 147f\n"
+ "ldr d9, [x12], #0x8\n"
+ "ldr d14, [x22], #0x8\n"
+ "mov x19, #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d26, [x20], #0x8\n"
+ "tbz x13, #0, 148f\n"
+ "ld1 { v9.s }[2], [x12]\n"
+ "ld1 { v14.s }[2], [x22]\n"
+ "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v26.s }[2], [x20]\n"
+ "b 148f\n"
+ "147:" // Height 4: Partial accumulate: partial_1_0
+ "ldr s9, [x12, #0x0]\n"
+ "ldr s14, [x22, #0x0]\n"
+ "mov x19, #0x0\n"
+ "ldr s21, [x21, #0x0]\n"
+ "ldr s26, [x20, #0x0]\n"
+ "148:" // Height 4: Partial accumulate: Done
+ "sub x12, x12, x19\n"
+ "b 150f\n"
+ "149:" // Height 4: full accumulate
+ "ldr q9, [x12, #0x0]\n"
+ "ldr q10, [x12, #0x10]\n"
+ "ldr q11, [x12, #0x20]\n"
+ "ldr q12, [x12, #0x30]\n"
+ "ldr q13, [x12, #0x40]\n"
+ "ldr q20, [x12, #0x50]\n"
+ "ldr q14, [x22, #0x0]\n"
+ "ldr q15, [x22, #0x10]\n"
+ "ldr q16, [x22, #0x20]\n"
+ "ldr q17, [x22, #0x30]\n"
+ "ldr q18, [x22, #0x40]\n"
+ "ldr q19, [x22, #0x50]\n"
+ "ldr q21, [x21, #0x0]\n"
+ "ldr q22, [x21, #0x10]\n"
+ "ldr q23, [x21, #0x20]\n"
+ "ldr q24, [x21, #0x30]\n"
+ "ldr q25, [x21, #0x40]\n"
+ "ldr q4, [x21, #0x50]\n"
+ "ldr q26, [x20, #0x0]\n"
+ "ldr q27, [x20, #0x10]\n"
+ "ldr q28, [x20, #0x20]\n"
+ "ldr q29, [x20, #0x30]\n"
+ "ldr q30, [x20, #0x40]\n"
+ "ldr q31, [x20, #0x50]\n"
+ "150:" // Height 4: MMLA fixup
+ "zip1 v8.2d, v9.2d, v14.2d\n"
+ "zip2 v14.2d, v9.2d, v14.2d\n"
+ "zip1 v9.2d, v10.2d, v15.2d\n"
+ "zip2 v15.2d, v10.2d, v15.2d\n"
+ "zip1 v10.2d, v11.2d, v16.2d\n"
+ "zip2 v16.2d, v11.2d, v16.2d\n"
+ "zip1 v11.2d, v12.2d, v17.2d\n"
+ "zip2 v17.2d, v12.2d, v17.2d\n"
+ "zip1 v12.2d, v13.2d, v18.2d\n"
+ "zip2 v18.2d, v13.2d, v18.2d\n"
+ "zip1 v13.2d, v20.2d, v19.2d\n"
+ "zip2 v19.2d, v20.2d, v19.2d\n"
+ "zip1 v20.2d, v21.2d, v26.2d\n"
+ "zip2 v26.2d, v21.2d, v26.2d\n"
+ "zip1 v21.2d, v22.2d, v27.2d\n"
+ "zip2 v27.2d, v22.2d, v27.2d\n"
+ "zip1 v22.2d, v23.2d, v28.2d\n"
+ "zip2 v28.2d, v23.2d, v28.2d\n"
+ "zip1 v23.2d, v24.2d, v29.2d\n"
+ "zip2 v29.2d, v24.2d, v29.2d\n"
+ "zip1 v24.2d, v25.2d, v30.2d\n"
+ "zip2 v30.2d, v25.2d, v30.2d\n"
+ "zip1 v25.2d, v4.2d, v31.2d\n"
+ "zip2 v31.2d, v4.2d, v31.2d\n"
+ "b 152f\n"
+ "151:" // Height 4: no accumulate
+ "movi v8.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "152:" // Height 4: setup done
+ "mov x25, #0x0\n"
+ "153:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 154f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 155f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
+ "b 155f\n"
+ "154:" // Height 4: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "155:" // Height 4: input setup done
+ "cmp x24, #0x4\n"
+ "blt 158f\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "cmp x24, #0x8\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ "ld1 { v3.4s }, [x20], #0x10\n"
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 157f\n"
+ "156:" // Height 4: Multiply loop: Main loop head
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x8\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x22], #0x10\n"
+ ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ld1 { v3.4s }, [x20], #0x10\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x11, #0x0]\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ "ld1 { v0.4s }, [x23], #0x10\n"
+ ".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
+ "ld1 { v2.4s }, [x21], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 156b\n"
+ "157:" // Height 4: Multiply loop: Single iteration only
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x24, x24, #0x4\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
+ "158:" // Height 4: Multiply loop: Main loop skip
+ "cbz x24, 161f\n"
+ "cbz x24, 161f\n"
+ "tbz x24, #1, 159f\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x21], #0x8\n"
+ "ldr d3, [x20], #0x8\n"
+ "tbz x24, #0, 160f\n"
+ "ld1 { v0.s }[2], [x23]\n"
+ "ld1 { v1.s }[2], [x22]\n"
+ "ld1 { v2.s }[2], [x21]\n"
+ "ld1 { v3.s }[2], [x20]\n"
+ "b 160f\n"
+ "159:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
+ "ldr s0, [x23, #0x0]\n"
+ "ldr s1, [x22, #0x0]\n"
+ "ldr s2, [x21, #0x0]\n"
+ "ldr s3, [x20, #0x0]\n"
+ "160:" // Height 4: Multiply loop: Ragged operand read: Done
+ "ldr q4, [x11, #0x0]\n"
+ "ldr q5, [x11, #0x10]\n"
+ ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ ".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x9, #0x0]\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x28, #0x0]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ "ldr q4, [x27, #0x0]\n"
+ ".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
+ "ldr q5, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x26, #0x0]\n"
+ ".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
+ "ldr q7, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e45ec5e // bfmmla v30.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec59 // bfmmla v25.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec13 // bfmmla v19.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec5f // bfmmla v31.4s, v2.8h, v7.8h\n"
+ "161:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 153b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp1 v4.2d, v8.2d, v14.2d\n"
+ "uzp2 v8.2d, v8.2d, v14.2d\n"
+ "uzp1 v14.2d, v9.2d, v15.2d\n"
+ "add x20, x21, x19, LSL #2\n"
+ "uzp2 v9.2d, v9.2d, v15.2d\n"
+ "uzp1 v15.2d, v10.2d, v16.2d\n"
+ "uzp2 v10.2d, v10.2d, v16.2d\n"
+ "uzp1 v16.2d, v11.2d, v17.2d\n"
+ "uzp2 v11.2d, v11.2d, v17.2d\n"
+ "uzp1 v17.2d, v12.2d, v18.2d\n"
+ "uzp2 v12.2d, v12.2d, v18.2d\n"
+ "uzp1 v18.2d, v13.2d, v19.2d\n"
+ "uzp2 v13.2d, v13.2d, v19.2d\n"
+ "uzp1 v19.2d, v20.2d, v26.2d\n"
+ "uzp2 v20.2d, v20.2d, v26.2d\n"
+ "uzp1 v26.2d, v21.2d, v27.2d\n"
+ "uzp2 v21.2d, v21.2d, v27.2d\n"
+ "uzp1 v27.2d, v22.2d, v28.2d\n"
+ "uzp2 v22.2d, v22.2d, v28.2d\n"
+ "uzp1 v28.2d, v23.2d, v29.2d\n"
+ "uzp2 v23.2d, v23.2d, v29.2d\n"
+ "uzp1 v29.2d, v24.2d, v30.2d\n"
+ "uzp2 v24.2d, v24.2d, v30.2d\n"
+ "uzp1 v30.2d, v25.2d, v31.2d\n"
+ "uzp2 v25.2d, v25.2d, v31.2d\n"
+ "tbz %x[flags], #1, 162f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1r { v1.4s }, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v0.4s }, [x19]\n"
+ "fmin v4.4s, v4.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v1.4s\n"
+ "fmin v28.4s, v28.4s, v1.4s\n"
+ "fmin v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmax v4.4s, v4.4s, v0.4s\n"
+ "fmax v14.4s, v14.4s, v0.4s\n"
+ "fmax v15.4s, v15.4s, v0.4s\n"
+ "fmax v16.4s, v16.4s, v0.4s\n"
+ "fmax v17.4s, v17.4s, v0.4s\n"
+ "fmax v18.4s, v18.4s, v0.4s\n"
+ "fmax v8.4s, v8.4s, v0.4s\n"
+ "fmax v9.4s, v9.4s, v0.4s\n"
+ "fmax v10.4s, v10.4s, v0.4s\n"
+ "fmax v11.4s, v11.4s, v0.4s\n"
+ "fmax v12.4s, v12.4s, v0.4s\n"
+ "fmax v13.4s, v13.4s, v0.4s\n"
+ "fmax v19.4s, v19.4s, v0.4s\n"
+ "fmax v26.4s, v26.4s, v0.4s\n"
+ "fmax v27.4s, v27.4s, v0.4s\n"
+ "fmax v28.4s, v28.4s, v0.4s\n"
+ "fmax v29.4s, v29.4s, v0.4s\n"
+ "fmax v30.4s, v30.4s, v0.4s\n"
+ "fmax v20.4s, v20.4s, v0.4s\n"
+ "fmax v21.4s, v21.4s, v0.4s\n"
+ "fmax v22.4s, v22.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v0.4s\n"
+ "fmax v24.4s, v24.4s, v0.4s\n"
+ "fmax v25.4s, v25.4s, v0.4s\n"
+ "162:" // Height 4: No activation
+ "cmp x13, #0x18\n"
+ "bge 175f\n"
+ "tbz x13, #4, 166f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v16.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v11.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "st1 { v21.4s }, [x20], #0x10\n"
+ "st1 { v22.4s }, [x20], #0x10\n"
+ "st1 { v23.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 164f\n"
+ "st1 { v17.4s }, [x12], #0x10\n"
+ "st1 { v12.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 163f\n"
+ "str d18, [x12], #0x8\n"
+ "str d13, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v18.s }[2], [x12]\n"
+ "st1 { v13.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "b 174f\n"
+ "163:" // Height 4: Partial direct writeback: partial_1_20
+ "tbz x13, #0, 174f\n"
+ "str s18, [x12, #0x0]\n"
+ "str s13, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
+ "b 174f\n"
+ "164:" // Height 4: Partial direct writeback: partial_2_16
+ "tbz x13, #1, 165f\n"
+ "str d17, [x12], #0x8\n"
+ "str d12, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v17.s }[2], [x12]\n"
+ "st1 { v12.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
+ "b 174f\n"
+ "165:" // Height 4: Partial direct writeback: partial_1_16
+ "tbz x13, #0, 174f\n"
+ "str s17, [x12, #0x0]\n"
+ "str s12, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
+ "b 174f\n"
+ "166:" // Height 4: Partial direct writeback: partial_8_0
+ "tbz x13, #3, 170f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v14.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v9.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "st1 { v21.4s }, [x20], #0x10\n"
+ "tbz x13, #2, 168f\n"
+ "st1 { v15.4s }, [x12], #0x10\n"
+ "st1 { v10.4s }, [x22], #0x10\n"
+ "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v22.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 167f\n"
+ "str d16, [x12], #0x8\n"
+ "str d11, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d23, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v16.s }[2], [x12]\n"
+ "st1 { v11.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v23.s }[2], [x20]\n"
+ "b 174f\n"
+ "167:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x13, #0, 174f\n"
+ "str s16, [x12, #0x0]\n"
+ "str s11, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s23, [x20, #0x0]\n"
+ "b 174f\n"
+ "168:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x13, #1, 169f\n"
+ "str d15, [x12], #0x8\n"
+ "str d10, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d22, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v15.s }[2], [x12]\n"
+ "st1 { v10.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v22.s }[2], [x20]\n"
+ "b 174f\n"
+ "169:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x13, #0, 174f\n"
+ "str s15, [x12, #0x0]\n"
+ "str s10, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s22, [x20, #0x0]\n"
+ "b 174f\n"
+ "170:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x13, #2, 172f\n"
+ "st1 { v4.4s }, [x12], #0x10\n"
+ "st1 { v8.4s }, [x22], #0x10\n"
+ "st1 { v19.4s }, [x21], #0x10\n"
+ "st1 { v20.4s }, [x20], #0x10\n"
+ "tbz x13, #1, 171f\n"
+ "str d14, [x12], #0x8\n"
+ "str d9, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d21, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v14.s }[2], [x12]\n"
+ "st1 { v9.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v21.s }[2], [x20]\n"
+ "b 174f\n"
+ "171:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x13, #0, 174f\n"
+ "str s14, [x12, #0x0]\n"
+ "str s9, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s21, [x20, #0x0]\n"
+ "b 174f\n"
+ "172:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x13, #1, 173f\n"
+ "str d4, [x12], #0x8\n"
+ "str d8, [x22], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "str d20, [x20], #0x8\n"
+ "tbz x13, #0, 174f\n"
+ "st1 { v4.s }[2], [x12]\n"
+ "st1 { v8.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x21]\n"
+ "st1 { v20.s }[2], [x20]\n"
+ "b 174f\n"
+ "173:" // Height 4: Partial direct writeback: partial_1_0
+ "str s4, [x12, #0x0]\n"
+ "str s8, [x22, #0x0]\n"
+ "str s19, [x21, #0x0]\n"
+ "str s20, [x20, #0x0]\n"
+ "174:" // Height 4: Partial direct writeback: Done
+ "b 176f\n"
+ "175:" // Height 4: Full writeback
+ "str q4, [x12, #0x0]\n"
+ "str q14, [x12, #0x10]\n"
+ "str q15, [x12, #0x20]\n"
+ "str q16, [x12, #0x30]\n"
+ "str q17, [x12, #0x40]\n"
+ "str q18, [x12, #0x50]\n"
+ "add x12, x12, #0x60\n"
+ "str q8, [x22, #0x0]\n"
+ "str q9, [x22, #0x10]\n"
+ "str q10, [x22, #0x20]\n"
+ "str q11, [x22, #0x30]\n"
+ "str q12, [x22, #0x40]\n"
+ "str q13, [x22, #0x50]\n"
+ "str q19, [x21, #0x0]\n"
+ "str q26, [x21, #0x10]\n"
+ "str q27, [x21, #0x20]\n"
+ "str q28, [x21, #0x30]\n"
+ "str q29, [x21, #0x40]\n"
+ "str q30, [x21, #0x50]\n"
+ "str q20, [x20, #0x0]\n"
+ "str q21, [x20, #0x10]\n"
+ "str q22, [x20, #0x20]\n"
+ "str q23, [x20, #0x30]\n"
+ "str q24, [x20, #0x40]\n"
+ "str q25, [x20, #0x50]\n"
+ "176:" // Height 4: Writeback done
+ "subs x13, x13, #0x18\n"
+ "bgt 134b\n"
+ "subs %x[M], %x[M], #0x4\n"
+ "beq 178f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 177f\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "177:" // Update direct input
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "178:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp
new file mode 100644
index 0000000000..e24dab68e8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const bfloat16 *, const bfloat16 *, size_t, \
+ float *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffinterleaved_bf16fp32_dot_8x12( ARGLIST );
+
+class cls_a64_ffinterleaved_bf16fp32_dot_8x12
+{
+public:
+ typedef bfloat16 operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return 12;
+ }
+ static unsigned int stripe_width()
+ {
+ return 4;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL128_BL32;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 2;
+ }
+
+
+ StdTransformsFixed<operand_type, result_type, 8, 12, 2> transforms = {};
+ StdTransformsFixed<operand_type, result_type, 8, 12, 2, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, bfloat16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 22.16, 8.25, 3.26 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffinterleaved_bf16fp32_dot_8x12;
+ cls_a64_ffinterleaved_bf16fp32_dot_8x12(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
new file mode 100644
index 0000000000..967396c377
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include <cstddef>
+#include "../../bfloat.hpp"
+
+namespace arm_gemm {
+
+void a64_ffinterleaved_bf16fp32_dot_8x12(
+ const bfloat16 *Apanel,
+ const bfloat16 *Bpanel,
+ size_t B_stride,
+ float *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const bfloat16 *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const bfloat16 *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/2) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "1:" // Height loop
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x8\n"
+ "mov %x[Apanel], x22\n"
+ "bgt 3f\n"
+ "cmp x23, #0x4\n"
+ "mov x20, x24\n"
+ "bgt 3f\n"
+ "mov x21, x24\n"
+ "3:" // B setup done
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "movi v8.16b, #0x0\n"
+ "ldr q4, [x24, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "movi v9.16b, #0x0\n"
+ "ldr q6, [x20, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "ldr q3, [%x[Apanel], #0x30]\n"
+ ".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
+ ".inst 0x4f60f08b // bfdot v11.4s, v4.8h, v0.h[1]\n"
+ ".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
+ "sub x19, x19, #0x2\n"
+ ".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
+ ".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
+ "cmp x19, #0x2\n"
+ ".inst 0x4f61f097 // bfdot v23.4s, v4.8h, v1.h[1]\n"
+ ".inst 0x4f41f89a // bfdot v26.4s, v4.8h, v1.h[2]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x4f61f89d // bfdot v29.4s, v4.8h, v1.h[3]\n"
+ "ldr q4, [x24, #0x10]\n"
+ ".inst 0x4f40f0a9 // bfdot v9.4s, v5.8h, v0.h[0]\n"
+ ".inst 0x4f60f0ac // bfdot v12.4s, v5.8h, v0.h[1]\n"
+ ".inst 0x4f40f8af // bfdot v15.4s, v5.8h, v0.h[2]\n"
+ "add x24, x24, #0x20\n"
+ ".inst 0x4f60f8b2 // bfdot v18.4s, v5.8h, v0.h[3]\n"
+ ".inst 0x4f41f0b5 // bfdot v21.4s, v5.8h, v1.h[0]\n"
+ ".inst 0x4f61f0b8 // bfdot v24.4s, v5.8h, v1.h[1]\n"
+ ".inst 0x4f41f8bb // bfdot v27.4s, v5.8h, v1.h[2]\n"
+ ".inst 0x4f61f8be // bfdot v30.4s, v5.8h, v1.h[3]\n"
+ "ldr q5, [x21, #0x10]\n"
+ ".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
+ ".inst 0x4f60f0cd // bfdot v13.4s, v6.8h, v0.h[1]\n"
+ ".inst 0x4f40f8d0 // bfdot v16.4s, v6.8h, v0.h[2]\n"
+ "add x21, x21, #0x20\n"
+ ".inst 0x4f60f8d3 // bfdot v19.4s, v6.8h, v0.h[3]\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ ".inst 0x4f41f0d6 // bfdot v22.4s, v6.8h, v1.h[0]\n"
+ ".inst 0x4f61f0d9 // bfdot v25.4s, v6.8h, v1.h[1]\n"
+ ".inst 0x4f41f8dc // bfdot v28.4s, v6.8h, v1.h[2]\n"
+ ".inst 0x4f61f8df // bfdot v31.4s, v6.8h, v1.h[3]\n"
+ "ldr q6, [x20, #0x10]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "add x20, x20, #0x20\n"
+ ".inst 0x4f42f088 // bfdot v8.4s, v4.8h, v2.h[0]\n"
+ ".inst 0x4f62f08b // bfdot v11.4s, v4.8h, v2.h[1]\n"
+ ".inst 0x4f42f88e // bfdot v14.4s, v4.8h, v2.h[2]\n"
+ ".inst 0x4f62f891 // bfdot v17.4s, v4.8h, v2.h[3]\n"
+ ".inst 0x4f43f094 // bfdot v20.4s, v4.8h, v3.h[0]\n"
+ ".inst 0x4f63f097 // bfdot v23.4s, v4.8h, v3.h[1]\n"
+ ".inst 0x4f43f89a // bfdot v26.4s, v4.8h, v3.h[2]\n"
+ ".inst 0x4f63f89d // bfdot v29.4s, v4.8h, v3.h[3]\n"
+ "ldr q4, [x24, #0x0]\n"
+ ".inst 0x4f42f0a9 // bfdot v9.4s, v5.8h, v2.h[0]\n"
+ ".inst 0x4f62f0ac // bfdot v12.4s, v5.8h, v2.h[1]\n"
+ ".inst 0x4f42f8af // bfdot v15.4s, v5.8h, v2.h[2]\n"
+ ".inst 0x4f62f8b2 // bfdot v18.4s, v5.8h, v2.h[3]\n"
+ ".inst 0x4f43f0b5 // bfdot v21.4s, v5.8h, v3.h[0]\n"
+ ".inst 0x4f63f0b8 // bfdot v24.4s, v5.8h, v3.h[1]\n"
+ ".inst 0x4f43f8bb // bfdot v27.4s, v5.8h, v3.h[2]\n"
+ ".inst 0x4f63f8be // bfdot v30.4s, v5.8h, v3.h[3]\n"
+ "ldr q5, [x21, #0x0]\n"
+ ".inst 0x4f42f0ca // bfdot v10.4s, v6.8h, v2.h[0]\n"
+ ".inst 0x4f62f0cd // bfdot v13.4s, v6.8h, v2.h[1]\n"
+ ".inst 0x4f42f8d0 // bfdot v16.4s, v6.8h, v2.h[2]\n"
+ ".inst 0x4f62f8d3 // bfdot v19.4s, v6.8h, v2.h[3]\n"
+ ".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
+ ".inst 0x4f63f0d9 // bfdot v25.4s, v6.8h, v3.h[1]\n"
+ ".inst 0x4f43f8dc // bfdot v28.4s, v6.8h, v3.h[2]\n"
+ ".inst 0x4f63f8df // bfdot v31.4s, v6.8h, v3.h[3]\n"
+ "ldr q6, [x20, #0x0]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ ".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
+ ".inst 0x4f60f08b // bfdot v11.4s, v4.8h, v0.h[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ ".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
+ ".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
+ ".inst 0x4f61f097 // bfdot v23.4s, v4.8h, v1.h[1]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4f41f89a // bfdot v26.4s, v4.8h, v1.h[2]\n"
+ ".inst 0x4f61f89d // bfdot v29.4s, v4.8h, v1.h[3]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x4f40f0a9 // bfdot v9.4s, v5.8h, v0.h[0]\n"
+ ".inst 0x4f60f0ac // bfdot v12.4s, v5.8h, v0.h[1]\n"
+ ".inst 0x4f40f8af // bfdot v15.4s, v5.8h, v0.h[2]\n"
+ ".inst 0x4f60f8b2 // bfdot v18.4s, v5.8h, v0.h[3]\n"
+ ".inst 0x4f41f0b5 // bfdot v21.4s, v5.8h, v1.h[0]\n"
+ ".inst 0x4f61f0b8 // bfdot v24.4s, v5.8h, v1.h[1]\n"
+ ".inst 0x4f41f8bb // bfdot v27.4s, v5.8h, v1.h[2]\n"
+ ".inst 0x4f61f8be // bfdot v30.4s, v5.8h, v1.h[3]\n"
+ ".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
+ ".inst 0x4f60f0cd // bfdot v13.4s, v6.8h, v0.h[1]\n"
+ ".inst 0x4f40f8d0 // bfdot v16.4s, v6.8h, v0.h[2]\n"
+ ".inst 0x4f60f8d3 // bfdot v19.4s, v6.8h, v0.h[3]\n"
+ ".inst 0x4f41f0d6 // bfdot v22.4s, v6.8h, v1.h[0]\n"
+ ".inst 0x4f61f0d9 // bfdot v25.4s, v6.8h, v1.h[1]\n"
+ ".inst 0x4f41f8dc // bfdot v28.4s, v6.8h, v1.h[2]\n"
+ ".inst 0x4f61f8df // bfdot v31.4s, v6.8h, v1.h[3]\n"
+ "cbz x19, 6f\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "ldr q7, [x24, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ ".inst 0x4f40f0e8 // bfdot v8.4s, v7.8h, v0.h[0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ ".inst 0x4f60f0eb // bfdot v11.4s, v7.8h, v0.h[1]\n"
+ ".inst 0x4f40f8ee // bfdot v14.4s, v7.8h, v0.h[2]\n"
+ ".inst 0x4f60f8f1 // bfdot v17.4s, v7.8h, v0.h[3]\n"
+ ".inst 0x4f41f0f4 // bfdot v20.4s, v7.8h, v1.h[0]\n"
+ ".inst 0x4f61f0f7 // bfdot v23.4s, v7.8h, v1.h[1]\n"
+ ".inst 0x4f41f8fa // bfdot v26.4s, v7.8h, v1.h[2]\n"
+ ".inst 0x4f61f8fd // bfdot v29.4s, v7.8h, v1.h[3]\n"
+ ".inst 0x4f40f089 // bfdot v9.4s, v4.8h, v0.h[0]\n"
+ ".inst 0x4f60f08c // bfdot v12.4s, v4.8h, v0.h[1]\n"
+ ".inst 0x4f40f88f // bfdot v15.4s, v4.8h, v0.h[2]\n"
+ ".inst 0x4f60f892 // bfdot v18.4s, v4.8h, v0.h[3]\n"
+ ".inst 0x4f41f095 // bfdot v21.4s, v4.8h, v1.h[0]\n"
+ ".inst 0x4f61f098 // bfdot v24.4s, v4.8h, v1.h[1]\n"
+ ".inst 0x4f41f89b // bfdot v27.4s, v4.8h, v1.h[2]\n"
+ ".inst 0x4f61f89e // bfdot v30.4s, v4.8h, v1.h[3]\n"
+ ".inst 0x4f40f0aa // bfdot v10.4s, v5.8h, v0.h[0]\n"
+ ".inst 0x4f60f0ad // bfdot v13.4s, v5.8h, v0.h[1]\n"
+ ".inst 0x4f40f8b0 // bfdot v16.4s, v5.8h, v0.h[2]\n"
+ ".inst 0x4f60f8b3 // bfdot v19.4s, v5.8h, v0.h[3]\n"
+ ".inst 0x4f41f0b6 // bfdot v22.4s, v5.8h, v1.h[0]\n"
+ ".inst 0x4f61f0b9 // bfdot v25.4s, v5.8h, v1.h[1]\n"
+ ".inst 0x4f41f8bc // bfdot v28.4s, v5.8h, v1.h[2]\n"
+ ".inst 0x4f61f8bf // bfdot v31.4s, v5.8h, v1.h[3]\n"
+ "6:" // multiply loop done
+ "subs x23, x23, #0xc\n"
+ "str q8, [%x[Cpanel], #0x0]\n"
+ "str q9, [%x[Cpanel], #0x10]\n"
+ "str q10, [%x[Cpanel], #0x20]\n"
+ "str q11, [%x[Cpanel], #0x30]\n"
+ "str q12, [%x[Cpanel], #0x40]\n"
+ "str q13, [%x[Cpanel], #0x50]\n"
+ "str q14, [%x[Cpanel], #0x60]\n"
+ "str q15, [%x[Cpanel], #0x70]\n"
+ "str q16, [%x[Cpanel], #0x80]\n"
+ "str q17, [%x[Cpanel], #0x90]\n"
+ "str q18, [%x[Cpanel], #0xa0]\n"
+ "str q19, [%x[Cpanel], #0xb0]\n"
+ "str q20, [%x[Cpanel], #0xc0]\n"
+ "str q21, [%x[Cpanel], #0xd0]\n"
+ "str q22, [%x[Cpanel], #0xe0]\n"
+ "str q23, [%x[Cpanel], #0xf0]\n"
+ "str q24, [%x[Cpanel], #0x100]\n"
+ "str q25, [%x[Cpanel], #0x110]\n"
+ "str q26, [%x[Cpanel], #0x120]\n"
+ "str q27, [%x[Cpanel], #0x130]\n"
+ "str q28, [%x[Cpanel], #0x140]\n"
+ "str q29, [%x[Cpanel], #0x150]\n"
+ "str q30, [%x[Cpanel], #0x160]\n"
+ "str q31, [%x[Cpanel], #0x170]\n"
+ "add %x[Cpanel], %x[Cpanel], #0x180\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp
new file mode 100644
index 0000000000..c61315b80a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const bfloat16 *, const bfloat16 *, size_t, \
+ float *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffinterleaved_bf16fp32_mmla_8x12( ARGLIST );
+
+class cls_a64_ffinterleaved_bf16fp32_mmla_8x12
+{
+public:
+ typedef bfloat16 operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return 12;
+ }
+ static unsigned int stripe_width()
+ {
+ return 4;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL256_BL64;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+
+ StdTransformsFixed<operand_type, result_type, 8, 12, 4> transforms = {};
+ StdTransformsFixed<operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, bfloat16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 31.62, 9.07, 3.23 };
+ }
+ }
+
+
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 38.10, 5.23, 3.15 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffinterleaved_bf16fp32_mmla_8x12;
+ cls_a64_ffinterleaved_bf16fp32_mmla_8x12(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
new file mode 100644
index 0000000000..509f2afa09
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include <cstddef>
+#include "../../bfloat.hpp"
+
+namespace arm_gemm {
+
+void a64_ffinterleaved_bf16fp32_mmla_8x12(
+ const bfloat16 *Apanel,
+ const bfloat16 *Bpanel,
+ size_t B_stride,
+ float *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const bfloat16 *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const bfloat16 *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/4) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "1:" // Height loop
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x8\n"
+ "mov %x[Apanel], x22\n"
+ "bgt 3f\n"
+ "cmp x23, #0x4\n"
+ "mov x20, x24\n"
+ "bgt 3f\n"
+ "mov x21, x24\n"
+ "3:" // B setup done
+ "ldr q4, [x24, #0x0]\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "movi v8.16b, #0x0\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "ldr q5, [x24, #0x10]\n"
+ "movi v9.16b, #0x0\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "add x24, x24, #0x20\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "ldr q3, [%x[Apanel], #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q7, [x21, #0x10]\n"
+ ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "sub x19, x19, #0x2\n"
+ ".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
+ "ldr q4, [x20, #0x0]\n"
+ ".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
+ "ldr q5, [x20, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "cmp x19, #0x2\n"
+ ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x24, #0x0]\n"
+ ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x24, #0x10]\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
+ "ldr q0, [%x[Apanel], #0x10]\n"
+ ".inst 0x6e44ec30 // bfmmla v16.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec33 // bfmmla v19.4s, v1.8h, v5.8h\n"
+ "ldr q1, [%x[Apanel], #0x20]\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
+ "ldr q2, [%x[Apanel], #0x30]\n"
+ ".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
+ "ldr q4, [x21, #0x20]\n"
+ ".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
+ "ldr q3, [%x[Apanel], #0x40]\n"
+ "ldr q5, [x21, #0x30]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
+ "add x21, x21, #0x40\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x20, #0x20]\n"
+ ".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x20, #0x30]\n"
+ ".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
+ "add x20, x20, #0x40\n"
+ ".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
+ "ldr q4, [x24, #0x20]\n"
+ ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
+ "ldr q5, [x24, #0x30]\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ "ldr q0, [%x[Apanel], #0x50]\n"
+ ".inst 0x6e46ec30 // bfmmla v16.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e47ec33 // bfmmla v19.4s, v1.8h, v7.8h\n"
+ "ldr q1, [%x[Apanel], #0x60]\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec59 // bfmmla v25.4s, v2.8h, v7.8h\n"
+ "ldr q2, [%x[Apanel], #0x70]\n"
+ ".inst 0x6e46ec7c // bfmmla v28.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e47ec7f // bfmmla v31.4s, v3.8h, v7.8h\n"
+ "add %x[Apanel], %x[Apanel], #0x80\n"
+ "add x24, x24, #0x40\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "ldr q3, [%x[Apanel], #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ "ldr q7, [x21, #0x10]\n"
+ ".inst 0x6e45ec0b // bfmmla v11.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e44ec2e // bfmmla v14.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec31 // bfmmla v17.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x6e45ec57 // bfmmla v23.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec7a // bfmmla v26.4s, v3.8h, v4.8h\n"
+ "ldr q4, [x20, #0x0]\n"
+ ".inst 0x6e45ec7d // bfmmla v29.4s, v3.8h, v5.8h\n"
+ "ldr q5, [x20, #0x10]\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec2f // bfmmla v15.4s, v1.8h, v6.8h\n"
+ "add x21, x21, #0x20\n"
+ ".inst 0x6e47ec32 // bfmmla v18.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ "add x20, x20, #0x20\n"
+ ".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec7b // bfmmla v27.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e47ec7e // bfmmla v30.4s, v3.8h, v7.8h\n"
+ ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e44ec30 // bfmmla v16.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec33 // bfmmla v19.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec7c // bfmmla v28.4s, v3.8h, v4.8h\n"
+ ".inst 0x6e45ec7f // bfmmla v31.4s, v3.8h, v5.8h\n"
+ "cbz x19, 6f\n"
+ "ldr q6, [x24, #0x0]\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "ldr q7, [x24, #0x10]\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "ldr q3, [%x[Apanel], #0x30]\n"
+ ".inst 0x6e46ec2e // bfmmla v14.4s, v1.8h, v6.8h\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x21, #0x10]\n"
+ ".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x6e46ec7a // bfmmla v26.4s, v3.8h, v6.8h\n"
+ "ldr q6, [x20, #0x0]\n"
+ ".inst 0x6e47ec7d // bfmmla v29.4s, v3.8h, v7.8h\n"
+ "ldr q7, [x20, #0x10]\n"
+ ".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e44ec2f // bfmmla v15.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec32 // bfmmla v18.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e44ec55 // bfmmla v21.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec7b // bfmmla v27.4s, v3.8h, v4.8h\n"
+ ".inst 0x6e45ec7e // bfmmla v30.4s, v3.8h, v5.8h\n"
+ ".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec30 // bfmmla v16.4s, v1.8h, v6.8h\n"
+ ".inst 0x6e47ec33 // bfmmla v19.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec59 // bfmmla v25.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec7c // bfmmla v28.4s, v3.8h, v6.8h\n"
+ ".inst 0x6e47ec7f // bfmmla v31.4s, v3.8h, v7.8h\n"
+ "6:" // multiply loop done
+ "subs x23, x23, #0xc\n"
+ "uzp1 v4.2d, v8.2d, v11.2d\n"
+ "uzp2 v8.2d, v8.2d, v11.2d\n"
+ "uzp1 v11.2d, v9.2d, v12.2d\n"
+ "uzp2 v9.2d, v9.2d, v12.2d\n"
+ "str q4, [%x[Cpanel], #0x0]\n"
+ "uzp1 v12.2d, v10.2d, v13.2d\n"
+ "uzp2 v10.2d, v10.2d, v13.2d\n"
+ "str q11, [%x[Cpanel], #0x10]\n"
+ "str q12, [%x[Cpanel], #0x20]\n"
+ "uzp1 v13.2d, v14.2d, v17.2d\n"
+ "uzp2 v14.2d, v14.2d, v17.2d\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
+ "uzp1 v17.2d, v15.2d, v18.2d\n"
+ "uzp2 v15.2d, v15.2d, v18.2d\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
+ "uzp1 v18.2d, v16.2d, v19.2d\n"
+ "uzp2 v16.2d, v16.2d, v19.2d\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
+ "uzp1 v19.2d, v20.2d, v23.2d\n"
+ "uzp2 v20.2d, v20.2d, v23.2d\n"
+ "str q13, [%x[Cpanel], #0x60]\n"
+ "uzp1 v23.2d, v21.2d, v24.2d\n"
+ "uzp2 v21.2d, v21.2d, v24.2d\n"
+ "str q17, [%x[Cpanel], #0x70]\n"
+ "uzp1 v24.2d, v22.2d, v25.2d\n"
+ "uzp2 v22.2d, v22.2d, v25.2d\n"
+ "str q18, [%x[Cpanel], #0x80]\n"
+ "uzp1 v25.2d, v26.2d, v29.2d\n"
+ "uzp2 v26.2d, v26.2d, v29.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "uzp1 v29.2d, v27.2d, v30.2d\n"
+ "uzp2 v27.2d, v27.2d, v30.2d\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
+ "uzp1 v30.2d, v28.2d, v31.2d\n"
+ "uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q16, [%x[Cpanel], #0xb0]\n"
+ "str q19, [%x[Cpanel], #0xc0]\n"
+ "str q23, [%x[Cpanel], #0xd0]\n"
+ "str q24, [%x[Cpanel], #0xe0]\n"
+ "str q20, [%x[Cpanel], #0xf0]\n"
+ "str q21, [%x[Cpanel], #0x100]\n"
+ "str q22, [%x[Cpanel], #0x110]\n"
+ "str q25, [%x[Cpanel], #0x120]\n"
+ "str q29, [%x[Cpanel], #0x130]\n"
+ "str q30, [%x[Cpanel], #0x140]\n"
+ "str q26, [%x[Cpanel], #0x150]\n"
+ "str q27, [%x[Cpanel], #0x160]\n"
+ "str q28, [%x[Cpanel], #0x170]\n"
+ "add %x[Cpanel], %x[Cpanel], #0x180\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp
new file mode 100644
index 0000000000..1495306879
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const __fp16 *, const __fp16 *, size_t, \
+ __fp16 *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffinterleaved_fp16_mla_8x24( ARGLIST );
+
+class cls_a64_ffinterleaved_fp16_mla_8x24
+{
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return 24;
+ }
+ static unsigned int stripe_width()
+ {
+ return 8;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL128_BL16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+
+ StdTransformsFixed<operand_type, result_type, 8, 24, 1> transforms = {};
+ StdTransformsFixed<operand_type, result_type, 8, 24, 1, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, __fp16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 22.87, 7.77, 2.03 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffinterleaved_fp16_mla_8x24;
+ cls_a64_ffinterleaved_fp16_mla_8x24(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
new file mode 100644
index 0000000000..19836f2e9d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+
+#include <cstddef>
+
+namespace arm_gemm {
+
+void a64_ffinterleaved_fp16_mla_8x24(
+ const __fp16 *Apanel,
+ const __fp16 *Bpanel,
+ size_t B_stride,
+ __fp16 *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const __fp16 *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const __fp16 *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/1) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "1:" // Height loop
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x10\n"
+ "mov %x[Apanel], x22\n"
+ "bgt 3f\n"
+ "cmp x23, #0x8\n"
+ "mov x20, x24\n"
+ "bgt 3f\n"
+ "mov x21, x24\n"
+ "3:" // B setup done
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "movi v8.16b, #0x0\n"
+ "ldr q3, [x21, #0x0]\n"
+ "ldr q4, [x20, #0x0]\n"
+ "movi v9.16b, #0x0\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "ldr q5, [x24, #0x10]\n"
+ "fmla v8.8h, v2.8h, v0.h[0]\n"
+ "ldr q6, [x21, #0x10]\n"
+ "ldr q7, [x20, #0x10]\n"
+ "fmla v11.8h, v2.8h, v0.h[1]\n"
+ "fmla v14.8h, v2.8h, v0.h[2]\n"
+ "fmla v17.8h, v2.8h, v0.h[3]\n"
+ "sub x19, x19, #0x2\n"
+ "fmla v20.8h, v2.8h, v0.h[4]\n"
+ "fmla v23.8h, v2.8h, v0.h[5]\n"
+ "cmp x19, #0x2\n"
+ "fmla v26.8h, v2.8h, v0.h[6]\n"
+ "fmla v29.8h, v2.8h, v0.h[7]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla v9.8h, v3.8h, v0.h[0]\n"
+ "fmla v12.8h, v3.8h, v0.h[1]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q2, [x24, #0x0]\n"
+ "fmla v15.8h, v3.8h, v0.h[2]\n"
+ "fmla v18.8h, v3.8h, v0.h[3]\n"
+ "fmla v21.8h, v3.8h, v0.h[4]\n"
+ "fmla v24.8h, v3.8h, v0.h[5]\n"
+ "add x21, x21, #0x20\n"
+ "fmla v27.8h, v3.8h, v0.h[6]\n"
+ "fmla v30.8h, v3.8h, v0.h[7]\n"
+ "ldr q3, [x21, #0x0]\n"
+ "fmla v10.8h, v4.8h, v0.h[0]\n"
+ "fmla v13.8h, v4.8h, v0.h[1]\n"
+ "add x20, x20, #0x20\n"
+ "fmla v16.8h, v4.8h, v0.h[2]\n"
+ "fmla v19.8h, v4.8h, v0.h[3]\n"
+ "fmla v22.8h, v4.8h, v0.h[4]\n"
+ "fmla v25.8h, v4.8h, v0.h[5]\n"
+ "fmla v28.8h, v4.8h, v0.h[6]\n"
+ "fmla v31.8h, v4.8h, v0.h[7]\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q4, [x20, #0x0]\n"
+ "fmla v8.8h, v5.8h, v1.h[0]\n"
+ "fmla v11.8h, v5.8h, v1.h[1]\n"
+ "fmla v14.8h, v5.8h, v1.h[2]\n"
+ "fmla v17.8h, v5.8h, v1.h[3]\n"
+ "fmla v20.8h, v5.8h, v1.h[4]\n"
+ "fmla v23.8h, v5.8h, v1.h[5]\n"
+ "fmla v26.8h, v5.8h, v1.h[6]\n"
+ "fmla v29.8h, v5.8h, v1.h[7]\n"
+ "fmla v9.8h, v6.8h, v1.h[0]\n"
+ "fmla v12.8h, v6.8h, v1.h[1]\n"
+ "fmla v15.8h, v6.8h, v1.h[2]\n"
+ "fmla v18.8h, v6.8h, v1.h[3]\n"
+ "fmla v21.8h, v6.8h, v1.h[4]\n"
+ "fmla v24.8h, v6.8h, v1.h[5]\n"
+ "fmla v27.8h, v6.8h, v1.h[6]\n"
+ "fmla v30.8h, v6.8h, v1.h[7]\n"
+ "fmla v10.8h, v7.8h, v1.h[0]\n"
+ "fmla v13.8h, v7.8h, v1.h[1]\n"
+ "fmla v16.8h, v7.8h, v1.h[2]\n"
+ "fmla v19.8h, v7.8h, v1.h[3]\n"
+ "fmla v22.8h, v7.8h, v1.h[4]\n"
+ "fmla v25.8h, v7.8h, v1.h[5]\n"
+ "fmla v28.8h, v7.8h, v1.h[6]\n"
+ "fmla v31.8h, v7.8h, v1.h[7]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "fmla v8.8h, v2.8h, v0.h[0]\n"
+ "fmla v11.8h, v2.8h, v0.h[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "fmla v14.8h, v2.8h, v0.h[2]\n"
+ "fmla v17.8h, v2.8h, v0.h[3]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v20.8h, v2.8h, v0.h[4]\n"
+ "fmla v23.8h, v2.8h, v0.h[5]\n"
+ "add x21, x21, #0x10\n"
+ "fmla v26.8h, v2.8h, v0.h[6]\n"
+ "fmla v29.8h, v2.8h, v0.h[7]\n"
+ "add x20, x20, #0x10\n"
+ "fmla v9.8h, v3.8h, v0.h[0]\n"
+ "fmla v12.8h, v3.8h, v0.h[1]\n"
+ "fmla v15.8h, v3.8h, v0.h[2]\n"
+ "fmla v18.8h, v3.8h, v0.h[3]\n"
+ "fmla v21.8h, v3.8h, v0.h[4]\n"
+ "fmla v24.8h, v3.8h, v0.h[5]\n"
+ "fmla v27.8h, v3.8h, v0.h[6]\n"
+ "fmla v30.8h, v3.8h, v0.h[7]\n"
+ "fmla v10.8h, v4.8h, v0.h[0]\n"
+ "fmla v13.8h, v4.8h, v0.h[1]\n"
+ "fmla v16.8h, v4.8h, v0.h[2]\n"
+ "fmla v19.8h, v4.8h, v0.h[3]\n"
+ "fmla v22.8h, v4.8h, v0.h[4]\n"
+ "fmla v25.8h, v4.8h, v0.h[5]\n"
+ "fmla v28.8h, v4.8h, v0.h[6]\n"
+ "fmla v31.8h, v4.8h, v0.h[7]\n"
+ "cbz x19, 6f\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q5, [x24, #0x0]\n"
+ "fmla v8.8h, v5.8h, v0.h[0]\n"
+ "ldr q6, [x21, #0x0]\n"
+ "ldr q7, [x20, #0x0]\n"
+ "fmla v11.8h, v5.8h, v0.h[1]\n"
+ "fmla v14.8h, v5.8h, v0.h[2]\n"
+ "fmla v17.8h, v5.8h, v0.h[3]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "fmla v20.8h, v5.8h, v0.h[4]\n"
+ "fmla v23.8h, v5.8h, v0.h[5]\n"
+ "fmla v26.8h, v5.8h, v0.h[6]\n"
+ "fmla v29.8h, v5.8h, v0.h[7]\n"
+ "fmla v9.8h, v6.8h, v0.h[0]\n"
+ "fmla v12.8h, v6.8h, v0.h[1]\n"
+ "fmla v15.8h, v6.8h, v0.h[2]\n"
+ "fmla v18.8h, v6.8h, v0.h[3]\n"
+ "fmla v21.8h, v6.8h, v0.h[4]\n"
+ "fmla v24.8h, v6.8h, v0.h[5]\n"
+ "fmla v27.8h, v6.8h, v0.h[6]\n"
+ "fmla v30.8h, v6.8h, v0.h[7]\n"
+ "fmla v10.8h, v7.8h, v0.h[0]\n"
+ "fmla v13.8h, v7.8h, v0.h[1]\n"
+ "fmla v16.8h, v7.8h, v0.h[2]\n"
+ "fmla v19.8h, v7.8h, v0.h[3]\n"
+ "fmla v22.8h, v7.8h, v0.h[4]\n"
+ "fmla v25.8h, v7.8h, v0.h[5]\n"
+ "fmla v28.8h, v7.8h, v0.h[6]\n"
+ "fmla v31.8h, v7.8h, v0.h[7]\n"
+ "6:" // multiply loop done
+ "subs x23, x23, #0x18\n"
+ "str q8, [%x[Cpanel], #0x0]\n"
+ "str q9, [%x[Cpanel], #0x10]\n"
+ "str q10, [%x[Cpanel], #0x20]\n"
+ "str q11, [%x[Cpanel], #0x30]\n"
+ "str q12, [%x[Cpanel], #0x40]\n"
+ "str q13, [%x[Cpanel], #0x50]\n"
+ "str q14, [%x[Cpanel], #0x60]\n"
+ "str q15, [%x[Cpanel], #0x70]\n"
+ "str q16, [%x[Cpanel], #0x80]\n"
+ "str q17, [%x[Cpanel], #0x90]\n"
+ "str q18, [%x[Cpanel], #0xa0]\n"
+ "str q19, [%x[Cpanel], #0xb0]\n"
+ "str q20, [%x[Cpanel], #0xc0]\n"
+ "str q21, [%x[Cpanel], #0xd0]\n"
+ "str q22, [%x[Cpanel], #0xe0]\n"
+ "str q23, [%x[Cpanel], #0xf0]\n"
+ "str q24, [%x[Cpanel], #0x100]\n"
+ "str q25, [%x[Cpanel], #0x110]\n"
+ "str q26, [%x[Cpanel], #0x120]\n"
+ "str q27, [%x[Cpanel], #0x130]\n"
+ "str q28, [%x[Cpanel], #0x140]\n"
+ "str q29, [%x[Cpanel], #0x150]\n"
+ "str q30, [%x[Cpanel], #0x160]\n"
+ "str q31, [%x[Cpanel], #0x170]\n"
+ "add %x[Cpanel], %x[Cpanel], #0x180\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ );
+}
+
+} // namespace arm_gemm
+#endif // defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp
new file mode 100644
index 0000000000..f2a836c9b4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const float *, const float *, size_t, \
+ float *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_ffinterleaved_fp32_mla_8x12( ARGLIST );
+
+class cls_a64_ffinterleaved_fp32_mla_8x12
+{
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return 12;
+ }
+ static unsigned int stripe_width()
+ {
+ return 4;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL128_BL32;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+
+ StdTransformsFixed<operand_type, result_type, 8, 12, 1> transforms = {};
+ StdTransformsFixed<operand_type, result_type, 8, 12, 1, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 12.56, 9.83, 3.02 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_ffinterleaved_fp32_mla_8x12;
+ cls_a64_ffinterleaved_fp32_mla_8x12(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
new file mode 100644
index 0000000000..bf804b5f43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include <cstddef>
+
+namespace arm_gemm {
+
+void a64_ffinterleaved_fp32_mla_8x12(
+ const float *Apanel,
+ const float *Bpanel,
+ size_t B_stride,
+ float *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const float *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const float *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/1) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "1:" // Height loop
+ "ldr x24, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x22, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x21, x24, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "cmp x23, #0x8\n"
+ "mov %x[Apanel], x22\n"
+ "bgt 3f\n"
+ "cmp x23, #0x4\n"
+ "mov x20, x24\n"
+ "bgt 3f\n"
+ "mov x21, x24\n"
+ "3:" // B setup done
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "movi v8.16b, #0x0\n"
+ "ldr q4, [x24, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "movi v9.16b, #0x0\n"
+ "ldr q6, [x20, #0x0]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x4\n"
+ "movi v10.16b, #0x0\n"
+ "movi v11.16b, #0x0\n"
+ "movi v12.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "movi v15.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v17.16b, #0x0\n"
+ "movi v18.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "movi v20.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v26.16b, #0x0\n"
+ "movi v27.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "movi v29.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v31.16b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "ldr q3, [%x[Apanel], #0x30]\n"
+ "fmla v8.4s, v4.4s, v0.s[0]\n"
+ "ldr q7, [x24, #0x10]\n"
+ "fmla v11.4s, v4.4s, v0.s[1]\n"
+ "fmla v14.4s, v4.4s, v0.s[2]\n"
+ "fmla v17.4s, v4.4s, v0.s[3]\n"
+ "fmla v20.4s, v4.4s, v1.s[0]\n"
+ "sub x19, x19, #0x4\n"
+ "fmla v23.4s, v4.4s, v1.s[1]\n"
+ "fmla v26.4s, v4.4s, v1.s[2]\n"
+ "cmp x19, #0x4\n"
+ "fmla v29.4s, v4.4s, v1.s[3]\n"
+ "ldr q4, [x21, #0x10]\n"
+ "fmla v9.4s, v5.4s, v0.s[0]\n"
+ "fmla v12.4s, v5.4s, v0.s[1]\n"
+ "fmla v15.4s, v5.4s, v0.s[2]\n"
+ "fmla v18.4s, v5.4s, v0.s[3]\n"
+ "fmla v21.4s, v5.4s, v1.s[0]\n"
+ "fmla v24.4s, v5.4s, v1.s[1]\n"
+ "fmla v27.4s, v5.4s, v1.s[2]\n"
+ "fmla v30.4s, v5.4s, v1.s[3]\n"
+ "ldr q5, [x20, #0x10]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v13.4s, v6.4s, v0.s[1]\n"
+ "fmla v16.4s, v6.4s, v0.s[2]\n"
+ "fmla v19.4s, v6.4s, v0.s[3]\n"
+ "ldr q0, [%x[Apanel], #0x40]\n"
+ "fmla v22.4s, v6.4s, v1.s[0]\n"
+ "fmla v25.4s, v6.4s, v1.s[1]\n"
+ "fmla v28.4s, v6.4s, v1.s[2]\n"
+ "fmla v31.4s, v6.4s, v1.s[3]\n"
+ "ldr q1, [%x[Apanel], #0x50]\n"
+ "ldr q6, [x24, #0x20]\n"
+ "fmla v8.4s, v7.4s, v2.s[0]\n"
+ "fmla v11.4s, v7.4s, v2.s[1]\n"
+ "fmla v14.4s, v7.4s, v2.s[2]\n"
+ "fmla v17.4s, v7.4s, v2.s[3]\n"
+ "fmla v20.4s, v7.4s, v3.s[0]\n"
+ "fmla v23.4s, v7.4s, v3.s[1]\n"
+ "fmla v26.4s, v7.4s, v3.s[2]\n"
+ "fmla v29.4s, v7.4s, v3.s[3]\n"
+ "ldr q7, [x21, #0x20]\n"
+ "fmla v9.4s, v4.4s, v2.s[0]\n"
+ "fmla v12.4s, v4.4s, v2.s[1]\n"
+ "fmla v15.4s, v4.4s, v2.s[2]\n"
+ "fmla v18.4s, v4.4s, v2.s[3]\n"
+ "fmla v21.4s, v4.4s, v3.s[0]\n"
+ "fmla v24.4s, v4.4s, v3.s[1]\n"
+ "fmla v27.4s, v4.4s, v3.s[2]\n"
+ "fmla v30.4s, v4.4s, v3.s[3]\n"
+ "ldr q4, [x20, #0x20]\n"
+ "fmla v10.4s, v5.4s, v2.s[0]\n"
+ "fmla v13.4s, v5.4s, v2.s[1]\n"
+ "fmla v16.4s, v5.4s, v2.s[2]\n"
+ "fmla v19.4s, v5.4s, v2.s[3]\n"
+ "ldr q2, [%x[Apanel], #0x60]\n"
+ "fmla v22.4s, v5.4s, v3.s[0]\n"
+ "fmla v25.4s, v5.4s, v3.s[1]\n"
+ "fmla v28.4s, v5.4s, v3.s[2]\n"
+ "fmla v31.4s, v5.4s, v3.s[3]\n"
+ "ldr q3, [%x[Apanel], #0x70]\n"
+ "ldr q5, [x24, #0x30]\n"
+ "fmla v8.4s, v6.4s, v0.s[0]\n"
+ "fmla v11.4s, v6.4s, v0.s[1]\n"
+ "fmla v14.4s, v6.4s, v0.s[2]\n"
+ "fmla v17.4s, v6.4s, v0.s[3]\n"
+ "add %x[Apanel], %x[Apanel], #0x80\n"
+ "fmla v20.4s, v6.4s, v1.s[0]\n"
+ "fmla v23.4s, v6.4s, v1.s[1]\n"
+ "add x24, x24, #0x40\n"
+ "fmla v26.4s, v6.4s, v1.s[2]\n"
+ "fmla v29.4s, v6.4s, v1.s[3]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "fmla v9.4s, v7.4s, v0.s[0]\n"
+ "fmla v12.4s, v7.4s, v0.s[1]\n"
+ "add x21, x21, #0x40\n"
+ "fmla v15.4s, v7.4s, v0.s[2]\n"
+ "fmla v18.4s, v7.4s, v0.s[3]\n"
+ "fmla v21.4s, v7.4s, v1.s[0]\n"
+ "fmla v24.4s, v7.4s, v1.s[1]\n"
+ "fmla v27.4s, v7.4s, v1.s[2]\n"
+ "fmla v30.4s, v7.4s, v1.s[3]\n"
+ "ldr q7, [x20, #0x30]\n"
+ "fmla v10.4s, v4.4s, v0.s[0]\n"
+ "fmla v13.4s, v4.4s, v0.s[1]\n"
+ "add x20, x20, #0x40\n"
+ "fmla v16.4s, v4.4s, v0.s[2]\n"
+ "fmla v19.4s, v4.4s, v0.s[3]\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "fmla v22.4s, v4.4s, v1.s[0]\n"
+ "fmla v25.4s, v4.4s, v1.s[1]\n"
+ "fmla v28.4s, v4.4s, v1.s[2]\n"
+ "fmla v31.4s, v4.4s, v1.s[3]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "ldr q4, [x24, #0x0]\n"
+ "fmla v8.4s, v5.4s, v2.s[0]\n"
+ "fmla v11.4s, v5.4s, v2.s[1]\n"
+ "fmla v14.4s, v5.4s, v2.s[2]\n"
+ "fmla v17.4s, v5.4s, v2.s[3]\n"
+ "fmla v20.4s, v5.4s, v3.s[0]\n"
+ "fmla v23.4s, v5.4s, v3.s[1]\n"
+ "fmla v26.4s, v5.4s, v3.s[2]\n"
+ "fmla v29.4s, v5.4s, v3.s[3]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "fmla v9.4s, v6.4s, v2.s[0]\n"
+ "fmla v12.4s, v6.4s, v2.s[1]\n"
+ "fmla v15.4s, v6.4s, v2.s[2]\n"
+ "fmla v18.4s, v6.4s, v2.s[3]\n"
+ "fmla v21.4s, v6.4s, v3.s[0]\n"
+ "fmla v24.4s, v6.4s, v3.s[1]\n"
+ "fmla v27.4s, v6.4s, v3.s[2]\n"
+ "fmla v30.4s, v6.4s, v3.s[3]\n"
+ "ldr q6, [x20, #0x0]\n"
+ "fmla v10.4s, v7.4s, v2.s[0]\n"
+ "fmla v13.4s, v7.4s, v2.s[1]\n"
+ "fmla v16.4s, v7.4s, v2.s[2]\n"
+ "fmla v19.4s, v7.4s, v2.s[3]\n"
+ "fmla v22.4s, v7.4s, v3.s[0]\n"
+ "fmla v25.4s, v7.4s, v3.s[1]\n"
+ "fmla v28.4s, v7.4s, v3.s[2]\n"
+ "fmla v31.4s, v7.4s, v3.s[3]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "fmla v8.4s, v4.4s, v0.s[0]\n"
+ "fmla v11.4s, v4.4s, v0.s[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla v14.4s, v4.4s, v0.s[2]\n"
+ "fmla v17.4s, v4.4s, v0.s[3]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v20.4s, v4.4s, v1.s[0]\n"
+ "fmla v23.4s, v4.4s, v1.s[1]\n"
+ "add x21, x21, #0x10\n"
+ "fmla v26.4s, v4.4s, v1.s[2]\n"
+ "fmla v29.4s, v4.4s, v1.s[3]\n"
+ "add x20, x20, #0x10\n"
+ "fmla v9.4s, v5.4s, v0.s[0]\n"
+ "fmla v12.4s, v5.4s, v0.s[1]\n"
+ "fmla v15.4s, v5.4s, v0.s[2]\n"
+ "fmla v18.4s, v5.4s, v0.s[3]\n"
+ "fmla v21.4s, v5.4s, v1.s[0]\n"
+ "fmla v24.4s, v5.4s, v1.s[1]\n"
+ "fmla v27.4s, v5.4s, v1.s[2]\n"
+ "fmla v30.4s, v5.4s, v1.s[3]\n"
+ "fmla v10.4s, v6.4s, v0.s[0]\n"
+ "fmla v13.4s, v6.4s, v0.s[1]\n"
+ "fmla v16.4s, v6.4s, v0.s[2]\n"
+ "fmla v19.4s, v6.4s, v0.s[3]\n"
+ "fmla v22.4s, v6.4s, v1.s[0]\n"
+ "fmla v25.4s, v6.4s, v1.s[1]\n"
+ "fmla v28.4s, v6.4s, v1.s[2]\n"
+ "fmla v31.4s, v6.4s, v1.s[3]\n"
+ "cbz x19, 7f\n"
+ "6:" // odd loop
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "subs x19, x19, #0x1\n"
+ "ldr q7, [x24, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "fmla v8.4s, v7.4s, v0.s[0]\n"
+ "ldr q5, [x20, #0x0]\n"
+ "fmla v11.4s, v7.4s, v0.s[1]\n"
+ "fmla v14.4s, v7.4s, v0.s[2]\n"
+ "fmla v17.4s, v7.4s, v0.s[3]\n"
+ "fmla v20.4s, v7.4s, v1.s[0]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla v23.4s, v7.4s, v1.s[1]\n"
+ "fmla v26.4s, v7.4s, v1.s[2]\n"
+ "add x24, x24, #0x10\n"
+ "fmla v29.4s, v7.4s, v1.s[3]\n"
+ "fmla v9.4s, v4.4s, v0.s[0]\n"
+ "add x21, x21, #0x10\n"
+ "fmla v12.4s, v4.4s, v0.s[1]\n"
+ "fmla v15.4s, v4.4s, v0.s[2]\n"
+ "add x20, x20, #0x10\n"
+ "fmla v18.4s, v4.4s, v0.s[3]\n"
+ "fmla v21.4s, v4.4s, v1.s[0]\n"
+ "fmla v24.4s, v4.4s, v1.s[1]\n"
+ "fmla v27.4s, v4.4s, v1.s[2]\n"
+ "fmla v30.4s, v4.4s, v1.s[3]\n"
+ "fmla v10.4s, v5.4s, v0.s[0]\n"
+ "fmla v13.4s, v5.4s, v0.s[1]\n"
+ "fmla v16.4s, v5.4s, v0.s[2]\n"
+ "fmla v19.4s, v5.4s, v0.s[3]\n"
+ "fmla v22.4s, v5.4s, v1.s[0]\n"
+ "fmla v25.4s, v5.4s, v1.s[1]\n"
+ "fmla v28.4s, v5.4s, v1.s[2]\n"
+ "fmla v31.4s, v5.4s, v1.s[3]\n"
+ "bne 6b\n"
+ "7:" // multiply loop done
+ "subs x23, x23, #0xc\n"
+ "str q8, [%x[Cpanel], #0x0]\n"
+ "str q9, [%x[Cpanel], #0x10]\n"
+ "str q10, [%x[Cpanel], #0x20]\n"
+ "str q11, [%x[Cpanel], #0x30]\n"
+ "str q12, [%x[Cpanel], #0x40]\n"
+ "str q13, [%x[Cpanel], #0x50]\n"
+ "str q14, [%x[Cpanel], #0x60]\n"
+ "str q15, [%x[Cpanel], #0x70]\n"
+ "str q16, [%x[Cpanel], #0x80]\n"
+ "str q17, [%x[Cpanel], #0x90]\n"
+ "str q18, [%x[Cpanel], #0xa0]\n"
+ "str q19, [%x[Cpanel], #0xb0]\n"
+ "str q20, [%x[Cpanel], #0xc0]\n"
+ "str q21, [%x[Cpanel], #0xd0]\n"
+ "str q22, [%x[Cpanel], #0xe0]\n"
+ "str q23, [%x[Cpanel], #0xf0]\n"
+ "str q24, [%x[Cpanel], #0x100]\n"
+ "str q25, [%x[Cpanel], #0x110]\n"
+ "str q26, [%x[Cpanel], #0x120]\n"
+ "str q27, [%x[Cpanel], #0x130]\n"
+ "str q28, [%x[Cpanel], #0x140]\n"
+ "str q29, [%x[Cpanel], #0x150]\n"
+ "str q30, [%x[Cpanel], #0x160]\n"
+ "str q31, [%x[Cpanel], #0x170]\n"
+ "add %x[Cpanel], %x[Cpanel], #0x180\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp
new file mode 100644
index 0000000000..e07fa549f3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<bfloat16>, \
+ size_t, size_t, \
+ const bfloat16 *, \
+ size_t, \
+ IndirectOutputArg<float>, \
+ const float *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffhybrid_bf16fp32_mmla_6x4VL( ARGLIST );
+
+class cls_sve_ffhybrid_bf16fp32_mmla_6x4VL
+{
+public:
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<float>() * 1;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL2VL_BL64;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<float>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsSVE<rhs_operand_type, result_type, 6, 8, 4> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, bfloat16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 49.10 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffhybrid_bf16fp32_mmla_6x4VL;
+ cls_sve_ffhybrid_bf16fp32_mmla_6x4VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
new file mode 100644
index 0000000000..c0b6b30762
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
@@ -0,0 +1,2227 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+#include "../../bfloat.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sve_ffhybrid_bf16fp32_mmla_6x4VL (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<bfloat16> A_arg,
+ size_t M, size_t N, const bfloat16 *B_ptr, size_t B_stride, IndirectOutputArg<float> output_arg,
+ const float *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const bfloat16 *B_ptr = {};
+ const bfloat16 *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "ptrue p5.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 71f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 57f\n"
+ "beq 43f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 29f\n"
+ "beq 15f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "zip2 z13.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "zip2 z14.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "addvl x14, x14, #4\n"
+ "zip2 z15.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "b 6f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 5f\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "zip1 z8.d, z9.d, z12.d\n"
+ "zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "zip1 z9.d, z10.d, z13.d\n"
+ "zip2 z13.d, z10.d, z13.d\n"
+ "zip1 z10.d, z11.d, z14.d\n"
+ "zip2 z14.d, z11.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "b 6f\n"
+ "5:" // Height 1: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "6:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "7:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 8f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "b 9f\n"
+ "8:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "9:" // Height 1: input setup done
+ "cmp x26, #0x8\n"
+ "ble 11f\n"
+ "10:" // Height 1: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ "add x25, x25, #0x10\n"
+ "addvl x11, x11, #4\n"
+ "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
+ "bgt 10b\n"
+ "11:" // Height 1: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "ble 12f\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "12:" // Height 1: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 7b\n"
+ "uzp1 z8.d, z8.d, z12.d\n"
+ "uzp1 z9.d, z9.d, z13.d\n"
+ "uzp1 z10.d, z10.d, z14.d\n"
+ "uzp1 z11.d, z11.d, z15.d\n"
+ "tbz %x[flags], #1, 13f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "13:" // Height 1: No activation
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "14:" // Height 1: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 2b\n"
+ "b 86f\n"
+ "15:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "16:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 17f\n"
+ "mov x10, x11\n"
+ "17:" // Height 2: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "zip2 z13.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "zip2 z14.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "addvl x14, x14, #4\n"
+ "zip2 z15.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "b 20f\n"
+ "18:" // Height 2: no bias
+ "tbz %x[flags], #0, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "zip1 z8.d, z9.d, z12.d\n"
+ "zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip1 z9.d, z10.d, z13.d\n"
+ "zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "zip1 z10.d, z11.d, z14.d\n"
+ "zip2 z14.d, z11.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "b 20f\n"
+ "19:" // Height 2: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "20:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "21:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 22f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "b 23f\n"
+ "22:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "23:" // Height 2: input setup done
+ "cmp x26, #0x8\n"
+ "ble 25f\n"
+ "24:" // Height 2: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "addvl x11, x11, #4\n"
+ "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
+ "bgt 24b\n"
+ "25:" // Height 2: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "subs x26, x26, #0x4\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "ble 26f\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "26:" // Height 2: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 21b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z7.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "add x24, x12, x19, LSL #2\n"
+ "uzp1 z12.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z13.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "uzp1 z14.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "tbz %x[flags], #1, 27f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z7.s, p5/M, z7.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "27:" // Height 2: No activation
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "28:" // Height 2: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 16b\n"
+ "b 86f\n"
+ "29:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "30:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 31f\n"
+ "mov x10, x11\n"
+ "31:" // Height 3: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "zip2 z13.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "zip2 z14.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "addvl x14, x14, #4\n"
+ "zip2 z15.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z20.d, z12.d\n"
+ "mov z17.d, z9.d\n"
+ "mov z21.d, z13.d\n"
+ "mov z18.d, z10.d\n"
+ "mov z22.d, z14.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z23.d, z15.d\n"
+ "b 34f\n"
+ "32:" // Height 3: no bias
+ "tbz %x[flags], #0, 33f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "zip1 z8.d, z9.d, z12.d\n"
+ "zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip1 z9.d, z10.d, z13.d\n"
+ "zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "zip1 z10.d, z11.d, z14.d\n"
+ "zip2 z14.d, z11.d, z14.d\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "b 34f\n"
+ "33:" // Height 3: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "34:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "35:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 36f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "b 37f\n"
+ "36:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "37:" // Height 3: input setup done
+ "cmp x26, #0x8\n"
+ "ble 39f\n"
+ "38:" // Height 3: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "addvl x11, x11, #4\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ "bgt 38b\n"
+ "39:" // Height 3: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x4\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ "ble 40f\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ "40:" // Height 3: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 35b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "uzp1 z7.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z12.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 z13.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "uzp1 z14.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "uzp1 z16.d, z16.d, z20.d\n"
+ "uzp1 z17.d, z17.d, z21.d\n"
+ "uzp1 z18.d, z18.d, z22.d\n"
+ "uzp1 z19.d, z19.d, z23.d\n"
+ "tbz %x[flags], #1, 41f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z7.s, p5/M, z7.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "41:" // Height 3: No activation
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "42:" // Height 3: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 30b\n"
+ "b 86f\n"
+ "43:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "44:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 45f\n"
+ "mov x10, x11\n"
+ "45:" // Height 4: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "zip2 z13.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "zip2 z14.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "addvl x14, x14, #4\n"
+ "zip2 z15.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z20.d, z12.d\n"
+ "mov z17.d, z9.d\n"
+ "mov z21.d, z13.d\n"
+ "mov z18.d, z10.d\n"
+ "mov z22.d, z14.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z23.d, z15.d\n"
+ "b 48f\n"
+ "46:" // Height 4: no bias
+ "tbz %x[flags], #0, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "zip1 z8.d, z9.d, z12.d\n"
+ "zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip1 z9.d, z10.d, z13.d\n"
+ "zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "zip1 z10.d, z11.d, z14.d\n"
+ "zip2 z14.d, z11.d, z14.d\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "b 48f\n"
+ "47:" // Height 4: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "48:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "49:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 50f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "b 51f\n"
+ "50:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "51:" // Height 4: input setup done
+ "cmp x26, #0x8\n"
+ "ble 53f\n"
+ "52:" // Height 4: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "cmp x26, #0x8\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ "bgt 52b\n"
+ "53:" // Height 4: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x4\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ "ble 54f\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ "54:" // Height 4: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 49b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 z7.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z12.d, z9.d, z13.d\n"
+ "add x22, x23, x19, LSL #2\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z13.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "uzp1 z14.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "uzp1 z15.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "uzp1 z20.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "uzp1 z22.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "tbz %x[flags], #1, 55f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z1.s\n"
+ "fmin z22.s, p5/M, z22.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z7.s, p5/M, z7.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z0.s\n"
+ "fmax z22.s, p5/M, z22.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "55:" // Height 4: No activation
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "56:" // Height 4: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 44b\n"
+ "b 86f\n"
+ "57:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "58:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 59f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 59f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 59f\n"
+ "mov x10, x11\n"
+ "59:" // Height 5: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 60f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "zip2 z13.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "zip2 z14.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "addvl x14, x14, #4\n"
+ "zip2 z15.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z20.d, z12.d\n"
+ "mov z17.d, z9.d\n"
+ "mov z21.d, z13.d\n"
+ "mov z18.d, z10.d\n"
+ "mov z22.d, z14.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z23.d, z15.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z28.d, z12.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z29.d, z13.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z30.d, z14.d\n"
+ "mov z27.d, z11.d\n"
+ "mov z31.d, z15.d\n"
+ "b 62f\n"
+ "60:" // Height 5: no bias
+ "tbz %x[flags], #0, 61f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "zip1 z8.d, z9.d, z12.d\n"
+ "zip2 z12.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip1 z9.d, z10.d, z13.d\n"
+ "zip2 z13.d, z10.d, z13.d\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "zip1 z10.d, z11.d, z14.d\n"
+ "zip2 z14.d, z11.d, z14.d\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z24.d, z25.d, z28.d\n"
+ "zip2 z28.d, z25.d, z28.d\n"
+ "zip1 z25.d, z26.d, z29.d\n"
+ "zip2 z29.d, z26.d, z29.d\n"
+ "zip1 z26.d, z27.d, z30.d\n"
+ "zip2 z30.d, z27.d, z30.d\n"
+ "zip1 z27.d, z6.d, z31.d\n"
+ "zip2 z31.d, z6.d, z31.d\n"
+ "b 62f\n"
+ "61:" // Height 5: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "62:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "63:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 64f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 65f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "b 65f\n"
+ "64:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "65:" // Height 5: input setup done
+ "cmp x26, #0x8\n"
+ "ble 67f\n"
+ "66:" // Height 5: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "trn1 z4.d, z5.d, z6.d\n"
+ "trn2 z5.d, z5.d, z6.d\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ ".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ ".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ ".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ ".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ "addvl x11, x11, #4\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ "addvl x10, x10, #4\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ "addvl x9, x9, #4\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ ".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ ".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
+ "bgt 66b\n"
+ "67:" // Height 5: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "trn1 z4.d, z5.d, z6.d\n"
+ "trn2 z5.d, z5.d, z6.d\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "subs x26, x26, #0x4\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ ".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ ".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ ".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ ".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
+ "ble 68f\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ ".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ ".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
+ "68:" // Height 5: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 63b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 z7.d, z8.d, z12.d\n"
+ "add x22, x23, x19, LSL #2\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z12.d, z9.d, z13.d\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z13.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "uzp1 z14.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "uzp1 z15.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "uzp1 z20.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "uzp1 z22.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "uzp1 z24.d, z24.d, z28.d\n"
+ "uzp1 z25.d, z25.d, z29.d\n"
+ "uzp1 z26.d, z26.d, z30.d\n"
+ "uzp1 z27.d, z27.d, z31.d\n"
+ "tbz %x[flags], #1, 69f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z1.s\n"
+ "fmin z22.s, p5/M, z22.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmin z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z1.s\n"
+ "fmin z26.s, p5/M, z26.s, z1.s\n"
+ "fmin z27.s, p5/M, z27.s, z1.s\n"
+ "fmax z7.s, p5/M, z7.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z0.s\n"
+ "fmax z22.s, p5/M, z22.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z0.s\n"
+ "fmax z26.s, p5/M, z26.s, z0.s\n"
+ "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "69:" // Height 5: No activation
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "70:" // Height 5: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 58b\n"
+ "b 86f\n"
+ "71:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "72:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 73f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 73f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 73f\n"
+ "mov x10, x11\n"
+ "73:" // Height 6: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 74f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "zip2 z13.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "zip2 z14.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "addvl x14, x14, #4\n"
+ "zip2 z15.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z20.d, z12.d\n"
+ "mov z17.d, z9.d\n"
+ "mov z21.d, z13.d\n"
+ "mov z18.d, z10.d\n"
+ "mov z22.d, z14.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z23.d, z15.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z28.d, z12.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z29.d, z13.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z30.d, z14.d\n"
+ "mov z27.d, z11.d\n"
+ "mov z31.d, z15.d\n"
+ "b 76f\n"
+ "74:" // Height 6: no bias
+ "tbz %x[flags], #0, 75f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z9.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z10.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z16.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "zip1 z8.d, z9.d, z12.d\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip2 z12.d, z9.d, z12.d\n"
+ "zip1 z9.d, z10.d, z13.d\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "zip2 z13.d, z10.d, z13.d\n"
+ "zip1 z10.d, z11.d, z14.d\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip2 z14.d, z11.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "ld1w { z6.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "zip2 z28.d, z25.d, z28.d\n"
+ "zip1 z25.d, z26.d, z29.d\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip2 z29.d, z26.d, z29.d\n"
+ "zip1 z26.d, z27.d, z30.d\n"
+ "zip2 z30.d, z27.d, z30.d\n"
+ "zip1 z27.d, z6.d, z31.d\n"
+ "zip2 z31.d, z6.d, z31.d\n"
+ "b 76f\n"
+ "75:" // Height 6: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "76:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "77:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 78f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 79f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
+ "b 79f\n"
+ "78:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "79:" // Height 6: input setup done
+ "cmp x26, #0x8\n"
+ "ble 81f\n"
+ "80:" // Height 6: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "ld1rqh { z6.h }, p0/Z, [x20]\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ "trn1 z4.d, z5.d, z6.d\n"
+ "trn2 z5.d, z5.d, z6.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "sub x26, x26, #0x8\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ ".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ ".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ ".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ ".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x11, #2, MUL VL]\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ "addvl x11, x11, #4\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ "addvl x10, x10, #4\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9, #2, MUL VL]\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ "addvl x9, x9, #4\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ ".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ ".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #3, MUL VL]\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
+ "bgt 80b\n"
+ "81:" // Height 6: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z4.d\n"
+ "ld1rqh { z5.h }, p0/Z, [x21]\n"
+ "ld1rqh { z6.h }, p0/Z, [x20]\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ "trn1 z4.d, z5.d, z6.d\n"
+ "trn2 z5.d, z5.d, z6.d\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e408 // bfmmla z8.s, z0.h, z7.h\n"
+ ".inst 0x6467e450 // bfmmla z16.s, z2.h, z7.h\n"
+ ".inst 0x6467e498 // bfmmla z24.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "subs x26, x26, #0x4\n"
+ ".inst 0x6466e40c // bfmmla z12.s, z0.h, z6.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e49c // bfmmla z28.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e451 // bfmmla z17.s, z2.h, z7.h\n"
+ ".inst 0x6467e499 // bfmmla z25.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6466e49d // bfmmla z29.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6467e40a // bfmmla z10.s, z0.h, z7.h\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6467e452 // bfmmla z18.s, z2.h, z7.h\n"
+ ".inst 0x6467e49a // bfmmla z26.s, z4.h, z7.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ ".inst 0x6466e40e // bfmmla z14.s, z0.h, z6.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ ".inst 0x6466e49e // bfmmla z30.s, z4.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6467e453 // bfmmla z19.s, z2.h, z7.h\n"
+ ".inst 0x6467e49b // bfmmla z27.s, z4.h, z7.h\n"
+ ".inst 0x6466e40f // bfmmla z15.s, z0.h, z6.h\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6466e49f // bfmmla z31.s, z4.h, z6.h\n"
+ "ble 82f\n"
+ "ld1h { z7.h }, p5/Z, [x11]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6467e428 // bfmmla z8.s, z1.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b8 // bfmmla z24.s, z5.h, z7.h\n"
+ ".inst 0x6466e42c // bfmmla z12.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bc // bfmmla z28.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e429 // bfmmla z9.s, z1.h, z7.h\n"
+ ".inst 0x6467e471 // bfmmla z17.s, z3.h, z7.h\n"
+ ".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
+ ".inst 0x6466e42d // bfmmla z13.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x9]\n"
+ ".inst 0x6466e475 // bfmmla z21.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bd // bfmmla z29.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6467e42a // bfmmla z10.s, z1.h, z7.h\n"
+ ".inst 0x6467e472 // bfmmla z18.s, z3.h, z7.h\n"
+ ".inst 0x6467e4ba // bfmmla z26.s, z5.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ ".inst 0x6466e476 // bfmmla z22.s, z3.h, z6.h\n"
+ ".inst 0x6466e4be // bfmmla z30.s, z5.h, z6.h\n"
+ "ld1h { z6.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6467e42b // bfmmla z11.s, z1.h, z7.h\n"
+ ".inst 0x6467e473 // bfmmla z19.s, z3.h, z7.h\n"
+ ".inst 0x6467e4bb // bfmmla z27.s, z5.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6466e477 // bfmmla z23.s, z3.h, z6.h\n"
+ ".inst 0x6466e4bf // bfmmla z31.s, z5.h, z6.h\n"
+ "82:" // Height 6: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 77b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "uzp1 z7.d, z8.d, z12.d\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z12.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z13.d, z10.d, z14.d\n"
+ "add x20, x21, x19, LSL #2\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "uzp1 z14.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "uzp1 z15.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "uzp1 z20.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "uzp1 z22.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "uzp1 z23.d, z24.d, z28.d\n"
+ "uzp2 z24.d, z24.d, z28.d\n"
+ "uzp1 z28.d, z25.d, z29.d\n"
+ "uzp2 z25.d, z25.d, z29.d\n"
+ "uzp1 z29.d, z26.d, z30.d\n"
+ "uzp2 z26.d, z26.d, z30.d\n"
+ "uzp1 z30.d, z27.d, z31.d\n"
+ "uzp2 z27.d, z27.d, z31.d\n"
+ "tbz %x[flags], #1, 83f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z7.s, p5/M, z7.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z1.s\n"
+ "fmin z22.s, p5/M, z22.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmin z23.s, p5/M, z23.s, z1.s\n"
+ "fmin z28.s, p5/M, z28.s, z1.s\n"
+ "fmin z29.s, p5/M, z29.s, z1.s\n"
+ "fmin z30.s, p5/M, z30.s, z1.s\n"
+ "fmin z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z1.s\n"
+ "fmin z26.s, p5/M, z26.s, z1.s\n"
+ "fmin z27.s, p5/M, z27.s, z1.s\n"
+ "fmax z7.s, p5/M, z7.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z0.s\n"
+ "fmax z22.s, p5/M, z22.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z23.s, p5/M, z23.s, z0.s\n"
+ "fmax z28.s, p5/M, z28.s, z0.s\n"
+ "fmax z29.s, p5/M, z29.s, z0.s\n"
+ "fmax z30.s, p5/M, z30.s, z0.s\n"
+ "fmax z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z0.s\n"
+ "fmax z26.s, p5/M, z26.s, z0.s\n"
+ "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "83:" // Height 6: No activation
+ "st1w { z7.s }, p4, [x12]\n"
+ "st1w { z12.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z14.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x21]\n"
+ "st1w { z28.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
+ "84:" // Height 6: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 72b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 86f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 85f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "85:" // Update direct input
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "86:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp
new file mode 100644
index 0000000000..acbc619eed
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<__fp16>, \
+ size_t, size_t, \
+ const __fp16 *, \
+ size_t, \
+ IndirectOutputArg<__fp16>, \
+ const __fp16 *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffhybrid_fp16_mla_6x4VL( ARGLIST );
+void sve_ffhybrid_fp16_mla_6x4VL_a64fx( ARGLIST );
+
+class cls_sve_ffhybrid_fp16_mla_6x4VL
+{
+public:
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<__fp16>() * 1;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL1VL_BL16;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<__fp16>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 1> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, __fp16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 31.51 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffhybrid_fp16_mla_6x4VL;
+ cls_sve_ffhybrid_fp16_mla_6x4VL(const CPUInfo *ci)
+ {
+ switch(ci->get_cpu_model()) {
+ default:
+ break;
+ case CPUModel::A64FX:
+ kernel=sve_ffhybrid_fp16_mla_6x4VL_a64fx;
+ break;
+ }
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
new file mode 100644
index 0000000000..181022bf51
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
@@ -0,0 +1,1530 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<__fp16> A_arg,
+ size_t M, size_t N, const __fp16 *B_ptr, size_t B_stride, IndirectOutputArg<__fp16> output_arg,
+ const __fp16 *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ __fp16 maxval = static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const __fp16 *B_ptr = {};
+ const __fp16 *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<__fp16>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "ptrue p4.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 66f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 53f\n"
+ "beq 40f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 27f\n"
+ "beq 14f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 3f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "b 6f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 5f\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "b 6f\n"
+ "5:" // Height 1: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "6:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "7:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 8f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "b 9f\n"
+ "8:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "9:" // Height 1: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "ble 11f\n"
+ "10:" // Height 1: Multiply loop: Main loop
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "bgt 10b\n"
+ "11:" // Height 1: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "bne 7b\n"
+ "tbz %x[flags], #1, 12f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
+ "fmin z8.h, p4/M, z8.h, z1.h\n"
+ "fmin z9.h, p4/M, z9.h, z1.h\n"
+ "fmin z10.h, p4/M, z10.h, z1.h\n"
+ "fmin z11.h, p4/M, z11.h, z1.h\n"
+ "fmax z8.h, p4/M, z8.h, z0.h\n"
+ "fmax z9.h, p4/M, z9.h, z0.h\n"
+ "fmax z10.h, p4/M, z10.h, z0.h\n"
+ "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "12:" // Height 1: No activation
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "13:" // Height 1: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 2b\n"
+ "b 80f\n"
+ "14:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "15:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 16f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 16f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 16f\n"
+ "mov x10, x11\n"
+ "16:" // Height 2: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 17f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "addvl x14, x14, #4\n"
+ "b 19f\n"
+ "17:" // Height 2: no bias
+ "tbz %x[flags], #0, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "b 19f\n"
+ "18:" // Height 2: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "19:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "20:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 21f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 22f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "b 22f\n"
+ "21:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "22:" // Height 2: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "ble 24f\n"
+ "23:" // Height 2: Multiply loop: Main loop
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "add x24, x24, #0x2\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "addvl x9, x9, #1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "bgt 23b\n"
+ "24:" // Height 2: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "bne 20b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "tbz %x[flags], #1, 25f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
+ "fmin z8.h, p4/M, z8.h, z1.h\n"
+ "fmin z9.h, p4/M, z9.h, z1.h\n"
+ "fmin z10.h, p4/M, z10.h, z1.h\n"
+ "fmin z11.h, p4/M, z11.h, z1.h\n"
+ "fmin z12.h, p4/M, z12.h, z1.h\n"
+ "fmin z13.h, p4/M, z13.h, z1.h\n"
+ "fmin z14.h, p4/M, z14.h, z1.h\n"
+ "fmin z15.h, p4/M, z15.h, z1.h\n"
+ "fmax z8.h, p4/M, z8.h, z0.h\n"
+ "fmax z9.h, p4/M, z9.h, z0.h\n"
+ "fmax z10.h, p4/M, z10.h, z0.h\n"
+ "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "fmax z12.h, p4/M, z12.h, z0.h\n"
+ "fmax z13.h, p4/M, z13.h, z0.h\n"
+ "fmax z14.h, p4/M, z14.h, z0.h\n"
+ "fmax z15.h, p4/M, z15.h, z0.h\n"
+ "25:" // Height 2: No activation
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "26:" // Height 2: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 15b\n"
+ "b 80f\n"
+ "27:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "28:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 29f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 29f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 29f\n"
+ "mov x10, x11\n"
+ "29:" // Height 3: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 30f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "b 32f\n"
+ "30:" // Height 3: no bias
+ "tbz %x[flags], #0, 31f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "b 32f\n"
+ "31:" // Height 3: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "32:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "33:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 34f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 35f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "b 35f\n"
+ "34:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "35:" // Height 3: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "ble 37f\n"
+ "36:" // Height 3: Multiply loop: Main loop
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x25, x25, #0x2\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x2\n"
+ "add x23, x23, #0x2\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "bgt 36b\n"
+ "37:" // Height 3: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "bne 33b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "tbz %x[flags], #1, 38f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
+ "fmin z8.h, p4/M, z8.h, z1.h\n"
+ "fmin z9.h, p4/M, z9.h, z1.h\n"
+ "fmin z10.h, p4/M, z10.h, z1.h\n"
+ "fmin z11.h, p4/M, z11.h, z1.h\n"
+ "fmin z12.h, p4/M, z12.h, z1.h\n"
+ "fmin z13.h, p4/M, z13.h, z1.h\n"
+ "fmin z14.h, p4/M, z14.h, z1.h\n"
+ "fmin z15.h, p4/M, z15.h, z1.h\n"
+ "fmin z16.h, p4/M, z16.h, z1.h\n"
+ "fmin z17.h, p4/M, z17.h, z1.h\n"
+ "fmin z18.h, p4/M, z18.h, z1.h\n"
+ "fmin z19.h, p4/M, z19.h, z1.h\n"
+ "fmax z8.h, p4/M, z8.h, z0.h\n"
+ "fmax z9.h, p4/M, z9.h, z0.h\n"
+ "fmax z10.h, p4/M, z10.h, z0.h\n"
+ "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "fmax z12.h, p4/M, z12.h, z0.h\n"
+ "fmax z13.h, p4/M, z13.h, z0.h\n"
+ "fmax z14.h, p4/M, z14.h, z0.h\n"
+ "fmax z15.h, p4/M, z15.h, z0.h\n"
+ "fmax z16.h, p4/M, z16.h, z0.h\n"
+ "fmax z17.h, p4/M, z17.h, z0.h\n"
+ "fmax z18.h, p4/M, z18.h, z0.h\n"
+ "fmax z19.h, p4/M, z19.h, z0.h\n"
+ "38:" // Height 3: No activation
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "39:" // Height 3: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 28b\n"
+ "b 80f\n"
+ "40:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "41:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 42f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 42f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 42f\n"
+ "mov x10, x11\n"
+ "42:" // Height 4: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 43f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "b 45f\n"
+ "43:" // Height 4: no bias
+ "tbz %x[flags], #0, 44f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "b 45f\n"
+ "44:" // Height 4: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "45:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "46:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 47f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "b 48f\n"
+ "47:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "48:" // Height 4: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "ble 50f\n"
+ "49:" // Height 4: Multiply loop: Main loop
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z20.h, p4/M, z6.h, z3.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x25, x25, #0x2\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x2\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "fmla z21.h, p4/M, z7.h, z3.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "add x23, x23, #0x2\n"
+ "add x22, x22, #0x2\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "bgt 49b\n"
+ "50:" // Height 4: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z20.h, p4/M, z6.h, z3.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "fmla z21.h, p4/M, z7.h, z3.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "bne 46b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "tbz %x[flags], #1, 51f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
+ "fmin z8.h, p4/M, z8.h, z1.h\n"
+ "fmin z9.h, p4/M, z9.h, z1.h\n"
+ "fmin z10.h, p4/M, z10.h, z1.h\n"
+ "fmin z11.h, p4/M, z11.h, z1.h\n"
+ "fmin z12.h, p4/M, z12.h, z1.h\n"
+ "fmin z13.h, p4/M, z13.h, z1.h\n"
+ "fmin z14.h, p4/M, z14.h, z1.h\n"
+ "fmin z15.h, p4/M, z15.h, z1.h\n"
+ "fmin z16.h, p4/M, z16.h, z1.h\n"
+ "fmin z17.h, p4/M, z17.h, z1.h\n"
+ "fmin z18.h, p4/M, z18.h, z1.h\n"
+ "fmin z19.h, p4/M, z19.h, z1.h\n"
+ "fmin z20.h, p4/M, z20.h, z1.h\n"
+ "fmin z21.h, p4/M, z21.h, z1.h\n"
+ "fmin z22.h, p4/M, z22.h, z1.h\n"
+ "fmin z23.h, p4/M, z23.h, z1.h\n"
+ "fmax z8.h, p4/M, z8.h, z0.h\n"
+ "fmax z9.h, p4/M, z9.h, z0.h\n"
+ "fmax z10.h, p4/M, z10.h, z0.h\n"
+ "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "fmax z12.h, p4/M, z12.h, z0.h\n"
+ "fmax z13.h, p4/M, z13.h, z0.h\n"
+ "fmax z14.h, p4/M, z14.h, z0.h\n"
+ "fmax z15.h, p4/M, z15.h, z0.h\n"
+ "fmax z16.h, p4/M, z16.h, z0.h\n"
+ "fmax z17.h, p4/M, z17.h, z0.h\n"
+ "fmax z18.h, p4/M, z18.h, z0.h\n"
+ "fmax z19.h, p4/M, z19.h, z0.h\n"
+ "fmax z20.h, p4/M, z20.h, z0.h\n"
+ "fmax z21.h, p4/M, z21.h, z0.h\n"
+ "fmax z22.h, p4/M, z22.h, z0.h\n"
+ "fmax z23.h, p4/M, z23.h, z0.h\n"
+ "51:" // Height 4: No activation
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
+ "52:" // Height 4: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 41b\n"
+ "b 80f\n"
+ "53:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "54:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 55f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 55f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 55f\n"
+ "mov x10, x11\n"
+ "55:" // Height 5: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 56f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "b 58f\n"
+ "56:" // Height 5: no bias
+ "tbz %x[flags], #0, 57f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "b 58f\n"
+ "57:" // Height 5: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "58:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "59:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 60f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 61f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "b 61f\n"
+ "60:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "61:" // Height 5: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "ble 63f\n"
+ "62:" // Height 5: Multiply loop: Main loop
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z20.h, p4/M, z6.h, z3.h\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z24.h, p4/M, z6.h, z4.h\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x24, x24, #0x2\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "add x23, x23, #0x2\n"
+ "add x22, x22, #0x2\n"
+ "fmla z21.h, p4/M, z7.h, z3.h\n"
+ "fmla z25.h, p4/M, z7.h, z4.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "add x21, x21, #0x2\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "fmla z26.h, p4/M, z6.h, z4.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "bgt 62b\n"
+ "63:" // Height 5: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z20.h, p4/M, z6.h, z3.h\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, p4/M, z6.h, z4.h\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, p4/M, z7.h, z3.h\n"
+ "fmla z25.h, p4/M, z7.h, z4.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "fmla z26.h, p4/M, z6.h, z4.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "bne 59b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "tbz %x[flags], #1, 64f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
+ "fmin z8.h, p4/M, z8.h, z1.h\n"
+ "fmin z9.h, p4/M, z9.h, z1.h\n"
+ "fmin z10.h, p4/M, z10.h, z1.h\n"
+ "fmin z11.h, p4/M, z11.h, z1.h\n"
+ "fmin z12.h, p4/M, z12.h, z1.h\n"
+ "fmin z13.h, p4/M, z13.h, z1.h\n"
+ "fmin z14.h, p4/M, z14.h, z1.h\n"
+ "fmin z15.h, p4/M, z15.h, z1.h\n"
+ "fmin z16.h, p4/M, z16.h, z1.h\n"
+ "fmin z17.h, p4/M, z17.h, z1.h\n"
+ "fmin z18.h, p4/M, z18.h, z1.h\n"
+ "fmin z19.h, p4/M, z19.h, z1.h\n"
+ "fmin z20.h, p4/M, z20.h, z1.h\n"
+ "fmin z21.h, p4/M, z21.h, z1.h\n"
+ "fmin z22.h, p4/M, z22.h, z1.h\n"
+ "fmin z23.h, p4/M, z23.h, z1.h\n"
+ "fmin z24.h, p4/M, z24.h, z1.h\n"
+ "fmin z25.h, p4/M, z25.h, z1.h\n"
+ "fmin z26.h, p4/M, z26.h, z1.h\n"
+ "fmin z27.h, p4/M, z27.h, z1.h\n"
+ "fmax z8.h, p4/M, z8.h, z0.h\n"
+ "fmax z9.h, p4/M, z9.h, z0.h\n"
+ "fmax z10.h, p4/M, z10.h, z0.h\n"
+ "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "fmax z12.h, p4/M, z12.h, z0.h\n"
+ "fmax z13.h, p4/M, z13.h, z0.h\n"
+ "fmax z14.h, p4/M, z14.h, z0.h\n"
+ "fmax z15.h, p4/M, z15.h, z0.h\n"
+ "fmax z16.h, p4/M, z16.h, z0.h\n"
+ "fmax z17.h, p4/M, z17.h, z0.h\n"
+ "fmax z18.h, p4/M, z18.h, z0.h\n"
+ "fmax z19.h, p4/M, z19.h, z0.h\n"
+ "fmax z20.h, p4/M, z20.h, z0.h\n"
+ "fmax z21.h, p4/M, z21.h, z0.h\n"
+ "fmax z22.h, p4/M, z22.h, z0.h\n"
+ "fmax z23.h, p4/M, z23.h, z0.h\n"
+ "fmax z24.h, p4/M, z24.h, z0.h\n"
+ "fmax z25.h, p4/M, z25.h, z0.h\n"
+ "fmax z26.h, p4/M, z26.h, z0.h\n"
+ "fmax z27.h, p4/M, z27.h, z0.h\n"
+ "64:" // Height 5: No activation
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x21]\n"
+ "st1h { z25.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x21, #3, MUL VL]\n"
+ "65:" // Height 5: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 54b\n"
+ "b 80f\n"
+ "66:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0xc\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "67:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 68f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 68f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 68f\n"
+ "mov x10, x11\n"
+ "68:" // Height 6: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p0.h, x19, x13\n"
+ "cbz x14, 69f\n"
+ "ld1h { z8.h }, p4/Z, [x14]\n"
+ "ld1h { z9.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "mov z28.d, z8.d\n"
+ "mov z29.d, z9.d\n"
+ "mov z30.d, z10.d\n"
+ "mov z31.d, z11.d\n"
+ "b 71f\n"
+ "69:" // Height 6: no bias
+ "tbz %x[flags], #0, 70f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #1\n"
+ "ld1h { z11.h }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p3/Z, [x24]\n"
+ "ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x23]\n"
+ "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x22]\n"
+ "ld1h { z21.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p3/Z, [x21]\n"
+ "ld1h { z25.h }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p3/Z, [x20]\n"
+ "ld1h { z29.h }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p0/Z, [x20, #3, MUL VL]\n"
+ "b 71f\n"
+ "70:" // Height 6: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "71:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "72:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 73f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 74f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
+ "b 74f\n"
+ "73:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "74:" // Height 6: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1rh { z5.h }, p4/Z, [x20]\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "ble 76f\n"
+ "75:" // Height 6: Multiply loop: Main loop
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z20.h, p4/M, z6.h, z3.h\n"
+ "add x25, x25, #0x2\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z24.h, p4/M, z6.h, z4.h\n"
+ "fmla z28.h, p4/M, z6.h, z5.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "add x24, x24, #0x2\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "add x23, x23, #0x2\n"
+ "add x22, x22, #0x2\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "fmla z21.h, p4/M, z7.h, z3.h\n"
+ "add x21, x21, #0x2\n"
+ "add x20, x20, #0x2\n"
+ "fmla z25.h, p4/M, z7.h, z4.h\n"
+ "fmla z29.h, p4/M, z7.h, z5.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "fmla z26.h, p4/M, z6.h, z4.h\n"
+ "fmla z30.h, p4/M, z6.h, z5.h\n"
+ "ld1h { z6.h }, p4/Z, [x11]\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "ld1rh { z0.h }, p4/Z, [x25]\n"
+ "ld1rh { z1.h }, p4/Z, [x24]\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "ld1rh { z2.h }, p4/Z, [x23]\n"
+ "ld1rh { z3.h }, p4/Z, [x22]\n"
+ "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "fmla z31.h, p4/M, z7.h, z5.h\n"
+ "ld1rh { z4.h }, p4/Z, [x21]\n"
+ "ld1rh { z5.h }, p4/Z, [x20]\n"
+ "ld1h { z7.h }, p4/Z, [x10]\n"
+ "bgt 75b\n"
+ "76:" // Height 6: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.h, p4/M, z6.h, z0.h\n"
+ "fmla z12.h, p4/M, z6.h, z1.h\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.h, p4/M, z6.h, z2.h\n"
+ "fmla z20.h, p4/M, z6.h, z3.h\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, p4/M, z6.h, z4.h\n"
+ "fmla z28.h, p4/M, z6.h, z5.h\n"
+ "ld1h { z6.h }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "fmla z13.h, p4/M, z7.h, z1.h\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, p4/M, z7.h, z2.h\n"
+ "fmla z21.h, p4/M, z7.h, z3.h\n"
+ "fmla z25.h, p4/M, z7.h, z4.h\n"
+ "fmla z29.h, p4/M, z7.h, z5.h\n"
+ "ld1h { z7.h }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, p4/M, z6.h, z0.h\n"
+ "fmla z14.h, p4/M, z6.h, z1.h\n"
+ "fmla z18.h, p4/M, z6.h, z2.h\n"
+ "fmla z22.h, p4/M, z6.h, z3.h\n"
+ "fmla z26.h, p4/M, z6.h, z4.h\n"
+ "fmla z30.h, p4/M, z6.h, z5.h\n"
+ "fmla z11.h, p4/M, z7.h, z0.h\n"
+ "fmla z15.h, p4/M, z7.h, z1.h\n"
+ "fmla z19.h, p4/M, z7.h, z2.h\n"
+ "fmla z23.h, p4/M, z7.h, z3.h\n"
+ "fmla z27.h, p4/M, z7.h, z4.h\n"
+ "fmla z31.h, p4/M, z7.h, z5.h\n"
+ "bne 72b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "tbz %x[flags], #1, 77f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p4/Z, [x19]\n"
+ "fmin z8.h, p4/M, z8.h, z1.h\n"
+ "fmin z9.h, p4/M, z9.h, z1.h\n"
+ "fmin z10.h, p4/M, z10.h, z1.h\n"
+ "fmin z11.h, p4/M, z11.h, z1.h\n"
+ "fmin z12.h, p4/M, z12.h, z1.h\n"
+ "fmin z13.h, p4/M, z13.h, z1.h\n"
+ "fmin z14.h, p4/M, z14.h, z1.h\n"
+ "fmin z15.h, p4/M, z15.h, z1.h\n"
+ "fmin z16.h, p4/M, z16.h, z1.h\n"
+ "fmin z17.h, p4/M, z17.h, z1.h\n"
+ "fmin z18.h, p4/M, z18.h, z1.h\n"
+ "fmin z19.h, p4/M, z19.h, z1.h\n"
+ "fmin z20.h, p4/M, z20.h, z1.h\n"
+ "fmin z21.h, p4/M, z21.h, z1.h\n"
+ "fmin z22.h, p4/M, z22.h, z1.h\n"
+ "fmin z23.h, p4/M, z23.h, z1.h\n"
+ "fmin z24.h, p4/M, z24.h, z1.h\n"
+ "fmin z25.h, p4/M, z25.h, z1.h\n"
+ "fmin z26.h, p4/M, z26.h, z1.h\n"
+ "fmin z27.h, p4/M, z27.h, z1.h\n"
+ "fmin z28.h, p4/M, z28.h, z1.h\n"
+ "fmin z29.h, p4/M, z29.h, z1.h\n"
+ "fmin z30.h, p4/M, z30.h, z1.h\n"
+ "fmin z31.h, p4/M, z31.h, z1.h\n"
+ "fmax z8.h, p4/M, z8.h, z0.h\n"
+ "fmax z9.h, p4/M, z9.h, z0.h\n"
+ "fmax z10.h, p4/M, z10.h, z0.h\n"
+ "fmax z11.h, p4/M, z11.h, z0.h\n"
+ "fmax z12.h, p4/M, z12.h, z0.h\n"
+ "fmax z13.h, p4/M, z13.h, z0.h\n"
+ "fmax z14.h, p4/M, z14.h, z0.h\n"
+ "fmax z15.h, p4/M, z15.h, z0.h\n"
+ "fmax z16.h, p4/M, z16.h, z0.h\n"
+ "fmax z17.h, p4/M, z17.h, z0.h\n"
+ "fmax z18.h, p4/M, z18.h, z0.h\n"
+ "fmax z19.h, p4/M, z19.h, z0.h\n"
+ "fmax z20.h, p4/M, z20.h, z0.h\n"
+ "fmax z21.h, p4/M, z21.h, z0.h\n"
+ "fmax z22.h, p4/M, z22.h, z0.h\n"
+ "fmax z23.h, p4/M, z23.h, z0.h\n"
+ "fmax z24.h, p4/M, z24.h, z0.h\n"
+ "fmax z25.h, p4/M, z25.h, z0.h\n"
+ "fmax z26.h, p4/M, z26.h, z0.h\n"
+ "fmax z27.h, p4/M, z27.h, z0.h\n"
+ "fmax z28.h, p4/M, z28.h, z0.h\n"
+ "fmax z29.h, p4/M, z29.h, z0.h\n"
+ "fmax z30.h, p4/M, z30.h, z0.h\n"
+ "fmax z31.h, p4/M, z31.h, z0.h\n"
+ "77:" // Height 6: No activation
+ "st1h { z8.h }, p3, [x12]\n"
+ "st1h { z9.h }, p2, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p1, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p3, [x24]\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x23]\n"
+ "st1h { z17.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x22]\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x21]\n"
+ "st1h { z25.h }, p2, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x21, #3, MUL VL]\n"
+ "st1h { z28.h }, p3, [x20]\n"
+ "st1h { z29.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z30.h }, p1, [x20, #2, MUL VL]\n"
+ "st1h { z31.h }, p0, [x20, #3, MUL VL]\n"
+ "78:" // Height 6: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 67b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 80f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 79f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "79:" // Update direct input
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "80:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
new file mode 100644
index 0000000000..0f995812d8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
@@ -0,0 +1,3318 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sve_ffhybrid_fp16_mla_6x4VL (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<__fp16> A_arg,
+ size_t M, size_t N, const __fp16 *B_ptr, size_t B_stride, IndirectOutputArg<__fp16> output_arg,
+ const __fp16 *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ __fp16 maxval = static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const __fp16 *B_ptr = {};
+ const __fp16 *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<__fp16>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "ptrue p5.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 71f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 57f\n"
+ "beq 43f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 29f\n"
+ "beq 15f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 3f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "b 6f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 5f\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "b 6f\n"
+ "5:" // Height 1: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "6:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "7:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 8f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "b 9f\n"
+ "8:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "9:" // Height 1: input setup done
+ "cmp x26, #0x8\n"
+ "ble 11f\n"
+ "10:" // Height 1: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "cmp x26, #0x8\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "add x25, x25, #0x10\n"
+ "addvl x11, x11, #8\n"
+ "addvl x10, x10, #8\n"
+ "addvl x9, x9, #8\n"
+ "addvl x28, x28, #8\n"
+ "bgt 10b\n"
+ "11:" // Height 1: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "12:" // Height 1: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 7b\n"
+ "tbz %x[flags], #1, 13f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z1.h\n"
+ "fmin z9.h, p5/M, z9.h, z1.h\n"
+ "fmin z10.h, p5/M, z10.h, z1.h\n"
+ "fmin z11.h, p5/M, z11.h, z1.h\n"
+ "fmax z8.h, p5/M, z8.h, z0.h\n"
+ "fmax z9.h, p5/M, z9.h, z0.h\n"
+ "fmax z10.h, p5/M, z10.h, z0.h\n"
+ "fmax z11.h, p5/M, z11.h, z0.h\n"
+ "13:" // Height 1: No activation
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "14:" // Height 1: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 2b\n"
+ "b 86f\n"
+ "15:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "16:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 17f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 17f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 17f\n"
+ "mov x10, x11\n"
+ "17:" // Height 2: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "addvl x14, x14, #4\n"
+ "b 20f\n"
+ "18:" // Height 2: no bias
+ "tbz %x[flags], #0, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "b 20f\n"
+ "19:" // Height 2: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "20:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "21:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 22f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "b 23f\n"
+ "22:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "23:" // Height 2: input setup done
+ "cmp x26, #0x8\n"
+ "ble 25f\n"
+ "24:" // Height 2: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "cmp x26, #0x8\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "bgt 24b\n"
+ "25:" // Height 2: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "26:" // Height 2: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 21b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "tbz %x[flags], #1, 27f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z1.h\n"
+ "fmin z9.h, p5/M, z9.h, z1.h\n"
+ "fmin z10.h, p5/M, z10.h, z1.h\n"
+ "fmin z11.h, p5/M, z11.h, z1.h\n"
+ "fmin z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z1.h\n"
+ "fmin z14.h, p5/M, z14.h, z1.h\n"
+ "fmin z15.h, p5/M, z15.h, z1.h\n"
+ "fmax z8.h, p5/M, z8.h, z0.h\n"
+ "fmax z9.h, p5/M, z9.h, z0.h\n"
+ "fmax z10.h, p5/M, z10.h, z0.h\n"
+ "fmax z11.h, p5/M, z11.h, z0.h\n"
+ "fmax z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z0.h\n"
+ "fmax z14.h, p5/M, z14.h, z0.h\n"
+ "fmax z15.h, p5/M, z15.h, z0.h\n"
+ "27:" // Height 2: No activation
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "28:" // Height 2: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 16b\n"
+ "b 86f\n"
+ "29:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "30:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 31f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 31f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 31f\n"
+ "mov x10, x11\n"
+ "31:" // Height 3: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "b 34f\n"
+ "32:" // Height 3: no bias
+ "tbz %x[flags], #0, 33f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "b 34f\n"
+ "33:" // Height 3: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "34:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "35:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 36f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "b 37f\n"
+ "36:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "37:" // Height 3: input setup done
+ "cmp x26, #0x8\n"
+ "ble 39f\n"
+ "38:" // Height 3: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "cmp x26, #0x8\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "bgt 38b\n"
+ "39:" // Height 3: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "ble 40f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "ble 40f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "ble 40f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "ble 40f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "ble 40f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "ble 40f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "ble 40f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "40:" // Height 3: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 35b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "tbz %x[flags], #1, 41f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z1.h\n"
+ "fmin z9.h, p5/M, z9.h, z1.h\n"
+ "fmin z10.h, p5/M, z10.h, z1.h\n"
+ "fmin z11.h, p5/M, z11.h, z1.h\n"
+ "fmin z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z1.h\n"
+ "fmin z14.h, p5/M, z14.h, z1.h\n"
+ "fmin z15.h, p5/M, z15.h, z1.h\n"
+ "fmin z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z1.h\n"
+ "fmin z18.h, p5/M, z18.h, z1.h\n"
+ "fmin z19.h, p5/M, z19.h, z1.h\n"
+ "fmax z8.h, p5/M, z8.h, z0.h\n"
+ "fmax z9.h, p5/M, z9.h, z0.h\n"
+ "fmax z10.h, p5/M, z10.h, z0.h\n"
+ "fmax z11.h, p5/M, z11.h, z0.h\n"
+ "fmax z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z0.h\n"
+ "fmax z14.h, p5/M, z14.h, z0.h\n"
+ "fmax z15.h, p5/M, z15.h, z0.h\n"
+ "fmax z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z0.h\n"
+ "fmax z18.h, p5/M, z18.h, z0.h\n"
+ "fmax z19.h, p5/M, z19.h, z0.h\n"
+ "41:" // Height 3: No activation
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "42:" // Height 3: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 30b\n"
+ "b 86f\n"
+ "43:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "44:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 45f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 45f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 45f\n"
+ "mov x10, x11\n"
+ "45:" // Height 4: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "b 48f\n"
+ "46:" // Height 4: no bias
+ "tbz %x[flags], #0, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "b 48f\n"
+ "47:" // Height 4: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "48:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "49:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 50f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "b 51f\n"
+ "50:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "51:" // Height 4: input setup done
+ "cmp x26, #0x8\n"
+ "ble 53f\n"
+ "52:" // Height 4: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "fmla z21.h, z7.h, z3.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z22.h, z6.h, z3.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "fmla z23.h, z7.h, z3.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z20.h, z6.h, z3.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "fmla z21.h, z7.h, z3.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z22.h, z6.h, z3.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "fmla z23.h, z7.h, z3.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z20.h, z6.h, z3.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "fmla z21.h, z7.h, z3.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z22.h, z6.h, z3.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "fmla z23.h, z7.h, z3.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z20.h, z6.h, z3.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "fmla z21.h, z7.h, z3.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z22.h, z6.h, z3.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "fmla z23.h, z7.h, z3.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z20.h, z6.h, z3.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "fmla z21.h, z7.h, z3.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z22.h, z6.h, z3.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "fmla z23.h, z7.h, z3.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z20.h, z6.h, z3.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "fmla z21.h, z7.h, z3.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z22.h, z6.h, z3.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "fmla z23.h, z7.h, z3.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z20.h, z6.h, z3.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "fmla z21.h, z7.h, z3.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z22.h, z6.h, z3.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "fmla z23.h, z7.h, z3.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z20.h, z6.h, z3.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "fmla z21.h, z7.h, z3.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z22.h, z6.h, z3.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "fmla z23.h, z7.h, z3.h[7]\n"
+ "bgt 52b\n"
+ "53:" // Height 4: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "fmla z21.h, z7.h, z3.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z22.h, z6.h, z3.h[0]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "fmla z23.h, z7.h, z3.h[0]\n"
+ "ble 54f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z20.h, z6.h, z3.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "fmla z21.h, z7.h, z3.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z22.h, z6.h, z3.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "fmla z23.h, z7.h, z3.h[1]\n"
+ "ble 54f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z20.h, z6.h, z3.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "fmla z21.h, z7.h, z3.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z22.h, z6.h, z3.h[2]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "fmla z23.h, z7.h, z3.h[2]\n"
+ "ble 54f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z20.h, z6.h, z3.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "fmla z21.h, z7.h, z3.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z22.h, z6.h, z3.h[3]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "fmla z23.h, z7.h, z3.h[3]\n"
+ "ble 54f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z20.h, z6.h, z3.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "fmla z21.h, z7.h, z3.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z22.h, z6.h, z3.h[4]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "fmla z23.h, z7.h, z3.h[4]\n"
+ "ble 54f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z20.h, z6.h, z3.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "fmla z21.h, z7.h, z3.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z22.h, z6.h, z3.h[5]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "fmla z23.h, z7.h, z3.h[5]\n"
+ "ble 54f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z20.h, z6.h, z3.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "fmla z21.h, z7.h, z3.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z22.h, z6.h, z3.h[6]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "fmla z23.h, z7.h, z3.h[6]\n"
+ "ble 54f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z20.h, z6.h, z3.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "fmla z21.h, z7.h, z3.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z22.h, z6.h, z3.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "fmla z23.h, z7.h, z3.h[7]\n"
+ "54:" // Height 4: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 49b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "tbz %x[flags], #1, 55f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z1.h\n"
+ "fmin z9.h, p5/M, z9.h, z1.h\n"
+ "fmin z10.h, p5/M, z10.h, z1.h\n"
+ "fmin z11.h, p5/M, z11.h, z1.h\n"
+ "fmin z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z1.h\n"
+ "fmin z14.h, p5/M, z14.h, z1.h\n"
+ "fmin z15.h, p5/M, z15.h, z1.h\n"
+ "fmin z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z1.h\n"
+ "fmin z18.h, p5/M, z18.h, z1.h\n"
+ "fmin z19.h, p5/M, z19.h, z1.h\n"
+ "fmin z20.h, p5/M, z20.h, z1.h\n"
+ "fmin z21.h, p5/M, z21.h, z1.h\n"
+ "fmin z22.h, p5/M, z22.h, z1.h\n"
+ "fmin z23.h, p5/M, z23.h, z1.h\n"
+ "fmax z8.h, p5/M, z8.h, z0.h\n"
+ "fmax z9.h, p5/M, z9.h, z0.h\n"
+ "fmax z10.h, p5/M, z10.h, z0.h\n"
+ "fmax z11.h, p5/M, z11.h, z0.h\n"
+ "fmax z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z0.h\n"
+ "fmax z14.h, p5/M, z14.h, z0.h\n"
+ "fmax z15.h, p5/M, z15.h, z0.h\n"
+ "fmax z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z0.h\n"
+ "fmax z18.h, p5/M, z18.h, z0.h\n"
+ "fmax z19.h, p5/M, z19.h, z0.h\n"
+ "fmax z20.h, p5/M, z20.h, z0.h\n"
+ "fmax z21.h, p5/M, z21.h, z0.h\n"
+ "fmax z22.h, p5/M, z22.h, z0.h\n"
+ "fmax z23.h, p5/M, z23.h, z0.h\n"
+ "55:" // Height 4: No activation
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
+ "56:" // Height 4: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 44b\n"
+ "b 86f\n"
+ "57:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "58:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 59f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 59f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 59f\n"
+ "mov x10, x11\n"
+ "59:" // Height 5: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 60f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "b 62f\n"
+ "60:" // Height 5: no bias
+ "tbz %x[flags], #0, 61f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x21]\n"
+ "ld1h { z25.h }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x21, #3, MUL VL]\n"
+ "b 62f\n"
+ "61:" // Height 5: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "62:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "63:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 64f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 65f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "b 65f\n"
+ "64:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "65:" // Height 5: input setup done
+ "cmp x26, #0x8\n"
+ "ble 67f\n"
+ "66:" // Height 5: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z24.h, z6.h, z4.h[0]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla z21.h, z7.h, z3.h[0]\n"
+ "fmla z25.h, z7.h, z4.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z22.h, z6.h, z3.h[0]\n"
+ "fmla z26.h, z6.h, z4.h[0]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "fmla z23.h, z7.h, z3.h[0]\n"
+ "fmla z27.h, z7.h, z4.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z20.h, z6.h, z3.h[1]\n"
+ "fmla z24.h, z6.h, z4.h[1]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "fmla z21.h, z7.h, z3.h[1]\n"
+ "fmla z25.h, z7.h, z4.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z22.h, z6.h, z3.h[1]\n"
+ "fmla z26.h, z6.h, z4.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "fmla z23.h, z7.h, z3.h[1]\n"
+ "fmla z27.h, z7.h, z4.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z20.h, z6.h, z3.h[2]\n"
+ "fmla z24.h, z6.h, z4.h[2]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "fmla z21.h, z7.h, z3.h[2]\n"
+ "fmla z25.h, z7.h, z4.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z22.h, z6.h, z3.h[2]\n"
+ "fmla z26.h, z6.h, z4.h[2]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "fmla z23.h, z7.h, z3.h[2]\n"
+ "fmla z27.h, z7.h, z4.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z20.h, z6.h, z3.h[3]\n"
+ "fmla z24.h, z6.h, z4.h[3]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "fmla z21.h, z7.h, z3.h[3]\n"
+ "fmla z25.h, z7.h, z4.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z22.h, z6.h, z3.h[3]\n"
+ "fmla z26.h, z6.h, z4.h[3]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "fmla z23.h, z7.h, z3.h[3]\n"
+ "fmla z27.h, z7.h, z4.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z20.h, z6.h, z3.h[4]\n"
+ "fmla z24.h, z6.h, z4.h[4]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "fmla z21.h, z7.h, z3.h[4]\n"
+ "fmla z25.h, z7.h, z4.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z22.h, z6.h, z3.h[4]\n"
+ "fmla z26.h, z6.h, z4.h[4]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "fmla z23.h, z7.h, z3.h[4]\n"
+ "fmla z27.h, z7.h, z4.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z20.h, z6.h, z3.h[5]\n"
+ "fmla z24.h, z6.h, z4.h[5]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "fmla z21.h, z7.h, z3.h[5]\n"
+ "fmla z25.h, z7.h, z4.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z22.h, z6.h, z3.h[5]\n"
+ "fmla z26.h, z6.h, z4.h[5]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "fmla z23.h, z7.h, z3.h[5]\n"
+ "fmla z27.h, z7.h, z4.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z20.h, z6.h, z3.h[6]\n"
+ "fmla z24.h, z6.h, z4.h[6]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "fmla z21.h, z7.h, z3.h[6]\n"
+ "fmla z25.h, z7.h, z4.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z22.h, z6.h, z3.h[6]\n"
+ "fmla z26.h, z6.h, z4.h[6]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "fmla z23.h, z7.h, z3.h[6]\n"
+ "fmla z27.h, z7.h, z4.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z20.h, z6.h, z3.h[7]\n"
+ "fmla z24.h, z6.h, z4.h[7]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "fmla z21.h, z7.h, z3.h[7]\n"
+ "fmla z25.h, z7.h, z4.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z22.h, z6.h, z3.h[7]\n"
+ "fmla z26.h, z6.h, z4.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "fmla z23.h, z7.h, z3.h[7]\n"
+ "fmla z27.h, z7.h, z4.h[7]\n"
+ "bgt 66b\n"
+ "67:" // Height 5: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[0]\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, z7.h, z3.h[0]\n"
+ "fmla z25.h, z7.h, z4.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z22.h, z6.h, z3.h[0]\n"
+ "fmla z26.h, z6.h, z4.h[0]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "fmla z23.h, z7.h, z3.h[0]\n"
+ "fmla z27.h, z7.h, z4.h[0]\n"
+ "ble 68f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z20.h, z6.h, z3.h[1]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[1]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, z7.h, z3.h[1]\n"
+ "fmla z25.h, z7.h, z4.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z22.h, z6.h, z3.h[1]\n"
+ "fmla z26.h, z6.h, z4.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "fmla z23.h, z7.h, z3.h[1]\n"
+ "fmla z27.h, z7.h, z4.h[1]\n"
+ "ble 68f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z20.h, z6.h, z3.h[2]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[2]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, z7.h, z3.h[2]\n"
+ "fmla z25.h, z7.h, z4.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z22.h, z6.h, z3.h[2]\n"
+ "fmla z26.h, z6.h, z4.h[2]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "fmla z23.h, z7.h, z3.h[2]\n"
+ "fmla z27.h, z7.h, z4.h[2]\n"
+ "ble 68f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z20.h, z6.h, z3.h[3]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[3]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, z7.h, z3.h[3]\n"
+ "fmla z25.h, z7.h, z4.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z22.h, z6.h, z3.h[3]\n"
+ "fmla z26.h, z6.h, z4.h[3]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "fmla z23.h, z7.h, z3.h[3]\n"
+ "fmla z27.h, z7.h, z4.h[3]\n"
+ "ble 68f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z20.h, z6.h, z3.h[4]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[4]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, z7.h, z3.h[4]\n"
+ "fmla z25.h, z7.h, z4.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z22.h, z6.h, z3.h[4]\n"
+ "fmla z26.h, z6.h, z4.h[4]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "fmla z23.h, z7.h, z3.h[4]\n"
+ "fmla z27.h, z7.h, z4.h[4]\n"
+ "ble 68f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z20.h, z6.h, z3.h[5]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[5]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, z7.h, z3.h[5]\n"
+ "fmla z25.h, z7.h, z4.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z22.h, z6.h, z3.h[5]\n"
+ "fmla z26.h, z6.h, z4.h[5]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "fmla z23.h, z7.h, z3.h[5]\n"
+ "fmla z27.h, z7.h, z4.h[5]\n"
+ "ble 68f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z20.h, z6.h, z3.h[6]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[6]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.h, z7.h, z3.h[6]\n"
+ "fmla z25.h, z7.h, z4.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z22.h, z6.h, z3.h[6]\n"
+ "fmla z26.h, z6.h, z4.h[6]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "fmla z23.h, z7.h, z3.h[6]\n"
+ "fmla z27.h, z7.h, z4.h[6]\n"
+ "ble 68f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z20.h, z6.h, z3.h[7]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z24.h, z6.h, z4.h[7]\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "fmla z21.h, z7.h, z3.h[7]\n"
+ "fmla z25.h, z7.h, z4.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z22.h, z6.h, z3.h[7]\n"
+ "fmla z26.h, z6.h, z4.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "fmla z23.h, z7.h, z3.h[7]\n"
+ "fmla z27.h, z7.h, z4.h[7]\n"
+ "68:" // Height 5: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 63b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "tbz %x[flags], #1, 69f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z1.h\n"
+ "fmin z9.h, p5/M, z9.h, z1.h\n"
+ "fmin z10.h, p5/M, z10.h, z1.h\n"
+ "fmin z11.h, p5/M, z11.h, z1.h\n"
+ "fmin z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z1.h\n"
+ "fmin z14.h, p5/M, z14.h, z1.h\n"
+ "fmin z15.h, p5/M, z15.h, z1.h\n"
+ "fmin z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z1.h\n"
+ "fmin z18.h, p5/M, z18.h, z1.h\n"
+ "fmin z19.h, p5/M, z19.h, z1.h\n"
+ "fmin z20.h, p5/M, z20.h, z1.h\n"
+ "fmin z21.h, p5/M, z21.h, z1.h\n"
+ "fmin z22.h, p5/M, z22.h, z1.h\n"
+ "fmin z23.h, p5/M, z23.h, z1.h\n"
+ "fmin z24.h, p5/M, z24.h, z1.h\n"
+ "fmin z25.h, p5/M, z25.h, z1.h\n"
+ "fmin z26.h, p5/M, z26.h, z1.h\n"
+ "fmin z27.h, p5/M, z27.h, z1.h\n"
+ "fmax z8.h, p5/M, z8.h, z0.h\n"
+ "fmax z9.h, p5/M, z9.h, z0.h\n"
+ "fmax z10.h, p5/M, z10.h, z0.h\n"
+ "fmax z11.h, p5/M, z11.h, z0.h\n"
+ "fmax z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z0.h\n"
+ "fmax z14.h, p5/M, z14.h, z0.h\n"
+ "fmax z15.h, p5/M, z15.h, z0.h\n"
+ "fmax z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z0.h\n"
+ "fmax z18.h, p5/M, z18.h, z0.h\n"
+ "fmax z19.h, p5/M, z19.h, z0.h\n"
+ "fmax z20.h, p5/M, z20.h, z0.h\n"
+ "fmax z21.h, p5/M, z21.h, z0.h\n"
+ "fmax z22.h, p5/M, z22.h, z0.h\n"
+ "fmax z23.h, p5/M, z23.h, z0.h\n"
+ "fmax z24.h, p5/M, z24.h, z0.h\n"
+ "fmax z25.h, p5/M, z25.h, z0.h\n"
+ "fmax z26.h, p5/M, z26.h, z0.h\n"
+ "fmax z27.h, p5/M, z27.h, z0.h\n"
+ "69:" // Height 5: No activation
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x21, #3, MUL VL]\n"
+ "70:" // Height 5: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 58b\n"
+ "b 86f\n"
+ "71:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0xc\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "72:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "cnth x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "add x19, x28, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 73f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 73f\n"
+ "dech x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 73f\n"
+ "mov x10, x11\n"
+ "73:" // Height 6: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p3.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p2.h, x19, x13\n"
+ "inch x19\n"
+ "whilelt p1.h, x19, x13\n"
+ "cbz x14, 74f\n"
+ "ld1h { z8.h }, p5/Z, [x14]\n"
+ "ld1h { z9.h }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1h { z10.h }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z11.h }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "mov z28.d, z8.d\n"
+ "mov z29.d, z9.d\n"
+ "mov z30.d, z10.d\n"
+ "mov z31.d, z11.d\n"
+ "b 76f\n"
+ "74:" // Height 6: no bias
+ "tbz %x[flags], #0, 75f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "ld1h { z8.h }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "ld1h { z9.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z10.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #1\n"
+ "ld1h { z11.h }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1h { z12.h }, p4/Z, [x24]\n"
+ "ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z16.h }, p4/Z, [x23]\n"
+ "ld1h { z17.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x22]\n"
+ "ld1h { z21.h }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1h { z24.h }, p4/Z, [x21]\n"
+ "ld1h { z25.h }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1h { z28.h }, p4/Z, [x20]\n"
+ "ld1h { z29.h }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1h { z30.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z31.h }, p1/Z, [x20, #3, MUL VL]\n"
+ "b 76f\n"
+ "75:" // Height 6: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "76:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "77:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 78f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 79f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #1\n"
+ "add x24, x24, x19, LSL #1\n"
+ "add x23, x23, x19, LSL #1\n"
+ "add x22, x22, x19, LSL #1\n"
+ "add x21, x21, x19, LSL #1\n"
+ "add x20, x20, x19, LSL #1\n"
+ "b 79f\n"
+ "78:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "79:" // Height 6: input setup done
+ "cmp x26, #0x8\n"
+ "ble 81f\n"
+ "80:" // Height 6: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x8\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "cmp x26, #0x8\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla z24.h, z6.h, z4.h[0]\n"
+ "fmla z28.h, z6.h, z5.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "add x20, x20, #0x10\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "fmla z21.h, z7.h, z3.h[0]\n"
+ "fmla z25.h, z7.h, z4.h[0]\n"
+ "fmla z29.h, z7.h, z5.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z22.h, z6.h, z3.h[0]\n"
+ "fmla z26.h, z6.h, z4.h[0]\n"
+ "fmla z30.h, z6.h, z5.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "fmla z23.h, z7.h, z3.h[0]\n"
+ "fmla z27.h, z7.h, z4.h[0]\n"
+ "fmla z31.h, z7.h, z5.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z20.h, z6.h, z3.h[1]\n"
+ "fmla z24.h, z6.h, z4.h[1]\n"
+ "fmla z28.h, z6.h, z5.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "fmla z21.h, z7.h, z3.h[1]\n"
+ "fmla z25.h, z7.h, z4.h[1]\n"
+ "fmla z29.h, z7.h, z5.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z22.h, z6.h, z3.h[1]\n"
+ "fmla z26.h, z6.h, z4.h[1]\n"
+ "fmla z30.h, z6.h, z5.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "fmla z23.h, z7.h, z3.h[1]\n"
+ "fmla z27.h, z7.h, z4.h[1]\n"
+ "fmla z31.h, z7.h, z5.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z20.h, z6.h, z3.h[2]\n"
+ "fmla z24.h, z6.h, z4.h[2]\n"
+ "fmla z28.h, z6.h, z5.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "fmla z21.h, z7.h, z3.h[2]\n"
+ "fmla z25.h, z7.h, z4.h[2]\n"
+ "fmla z29.h, z7.h, z5.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z22.h, z6.h, z3.h[2]\n"
+ "fmla z26.h, z6.h, z4.h[2]\n"
+ "fmla z30.h, z6.h, z5.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "fmla z23.h, z7.h, z3.h[2]\n"
+ "fmla z27.h, z7.h, z4.h[2]\n"
+ "fmla z31.h, z7.h, z5.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z20.h, z6.h, z3.h[3]\n"
+ "fmla z24.h, z6.h, z4.h[3]\n"
+ "fmla z28.h, z6.h, z5.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "fmla z21.h, z7.h, z3.h[3]\n"
+ "fmla z25.h, z7.h, z4.h[3]\n"
+ "fmla z29.h, z7.h, z5.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #3, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z22.h, z6.h, z3.h[3]\n"
+ "fmla z26.h, z6.h, z4.h[3]\n"
+ "fmla z30.h, z6.h, z5.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "fmla z23.h, z7.h, z3.h[3]\n"
+ "fmla z27.h, z7.h, z4.h[3]\n"
+ "fmla z31.h, z7.h, z5.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z20.h, z6.h, z3.h[4]\n"
+ "fmla z24.h, z6.h, z4.h[4]\n"
+ "fmla z28.h, z6.h, z5.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "fmla z21.h, z7.h, z3.h[4]\n"
+ "fmla z25.h, z7.h, z4.h[4]\n"
+ "fmla z29.h, z7.h, z5.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #4, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z22.h, z6.h, z3.h[4]\n"
+ "fmla z26.h, z6.h, z4.h[4]\n"
+ "fmla z30.h, z6.h, z5.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "fmla z23.h, z7.h, z3.h[4]\n"
+ "fmla z27.h, z7.h, z4.h[4]\n"
+ "fmla z31.h, z7.h, z5.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z20.h, z6.h, z3.h[5]\n"
+ "fmla z24.h, z6.h, z4.h[5]\n"
+ "fmla z28.h, z6.h, z5.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "fmla z21.h, z7.h, z3.h[5]\n"
+ "fmla z25.h, z7.h, z4.h[5]\n"
+ "fmla z29.h, z7.h, z5.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #5, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z22.h, z6.h, z3.h[5]\n"
+ "fmla z26.h, z6.h, z4.h[5]\n"
+ "fmla z30.h, z6.h, z5.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "fmla z23.h, z7.h, z3.h[5]\n"
+ "fmla z27.h, z7.h, z4.h[5]\n"
+ "fmla z31.h, z7.h, z5.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z20.h, z6.h, z3.h[6]\n"
+ "fmla z24.h, z6.h, z4.h[6]\n"
+ "fmla z28.h, z6.h, z5.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "fmla z21.h, z7.h, z3.h[6]\n"
+ "fmla z25.h, z7.h, z4.h[6]\n"
+ "fmla z29.h, z7.h, z5.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #6, MUL VL]\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z22.h, z6.h, z3.h[6]\n"
+ "fmla z26.h, z6.h, z4.h[6]\n"
+ "fmla z30.h, z6.h, z5.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #8\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "fmla z23.h, z7.h, z3.h[6]\n"
+ "fmla z27.h, z7.h, z4.h[6]\n"
+ "fmla z31.h, z7.h, z5.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z20.h, z6.h, z3.h[7]\n"
+ "fmla z24.h, z6.h, z4.h[7]\n"
+ "fmla z28.h, z6.h, z5.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9, #7, MUL VL]\n"
+ "addvl x9, x9, #8\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "fmla z21.h, z7.h, z3.h[7]\n"
+ "fmla z25.h, z7.h, z4.h[7]\n"
+ "fmla z29.h, z7.h, z5.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z22.h, z6.h, z3.h[7]\n"
+ "fmla z26.h, z6.h, z4.h[7]\n"
+ "fmla z30.h, z6.h, z5.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "fmla z23.h, z7.h, z3.h[7]\n"
+ "fmla z27.h, z7.h, z4.h[7]\n"
+ "fmla z31.h, z7.h, z5.h[7]\n"
+ "bgt 80b\n"
+ "81:" // Height 6: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x26\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z3.h }, p0/Z, [x22]\n"
+ "ld1rqh { z4.h }, p0/Z, [x21]\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[0]\n"
+ "fmla z16.h, z6.h, z2.h[0]\n"
+ "fmla z20.h, z6.h, z3.h[0]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z24.h, z6.h, z4.h[0]\n"
+ "fmla z28.h, z6.h, z5.h[0]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z9.h, z7.h, z0.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[0]\n"
+ "fmla z17.h, z7.h, z2.h[0]\n"
+ "fmla z21.h, z7.h, z3.h[0]\n"
+ "fmla z25.h, z7.h, z4.h[0]\n"
+ "fmla z29.h, z7.h, z5.h[0]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[0]\n"
+ "fmla z14.h, z6.h, z1.h[0]\n"
+ "fmla z18.h, z6.h, z2.h[0]\n"
+ "fmla z22.h, z6.h, z3.h[0]\n"
+ "fmla z26.h, z6.h, z4.h[0]\n"
+ "fmla z30.h, z6.h, z5.h[0]\n"
+ "fmla z11.h, z7.h, z0.h[0]\n"
+ "fmla z15.h, z7.h, z1.h[0]\n"
+ "fmla z19.h, z7.h, z2.h[0]\n"
+ "fmla z23.h, z7.h, z3.h[0]\n"
+ "fmla z27.h, z7.h, z4.h[0]\n"
+ "fmla z31.h, z7.h, z5.h[0]\n"
+ "ble 82f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[1]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z16.h, z6.h, z2.h[1]\n"
+ "fmla z20.h, z6.h, z3.h[1]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[1]\n"
+ "fmla z28.h, z6.h, z5.h[1]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z7.h, z0.h[1]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[1]\n"
+ "fmla z21.h, z7.h, z3.h[1]\n"
+ "fmla z25.h, z7.h, z4.h[1]\n"
+ "fmla z29.h, z7.h, z5.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[1]\n"
+ "fmla z14.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z2.h[1]\n"
+ "fmla z22.h, z6.h, z3.h[1]\n"
+ "fmla z26.h, z6.h, z4.h[1]\n"
+ "fmla z30.h, z6.h, z5.h[1]\n"
+ "fmla z11.h, z7.h, z0.h[1]\n"
+ "fmla z15.h, z7.h, z1.h[1]\n"
+ "fmla z19.h, z7.h, z2.h[1]\n"
+ "fmla z23.h, z7.h, z3.h[1]\n"
+ "fmla z27.h, z7.h, z4.h[1]\n"
+ "fmla z31.h, z7.h, z5.h[1]\n"
+ "ble 82f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[2]\n"
+ "fmla z12.h, z6.h, z1.h[2]\n"
+ "fmla z16.h, z6.h, z2.h[2]\n"
+ "fmla z20.h, z6.h, z3.h[2]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[2]\n"
+ "fmla z28.h, z6.h, z5.h[2]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z7.h, z0.h[2]\n"
+ "fmla z13.h, z7.h, z1.h[2]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[2]\n"
+ "fmla z21.h, z7.h, z3.h[2]\n"
+ "fmla z25.h, z7.h, z4.h[2]\n"
+ "fmla z29.h, z7.h, z5.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[2]\n"
+ "fmla z14.h, z6.h, z1.h[2]\n"
+ "fmla z18.h, z6.h, z2.h[2]\n"
+ "fmla z22.h, z6.h, z3.h[2]\n"
+ "fmla z26.h, z6.h, z4.h[2]\n"
+ "fmla z30.h, z6.h, z5.h[2]\n"
+ "fmla z11.h, z7.h, z0.h[2]\n"
+ "fmla z15.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z2.h[2]\n"
+ "fmla z23.h, z7.h, z3.h[2]\n"
+ "fmla z27.h, z7.h, z4.h[2]\n"
+ "fmla z31.h, z7.h, z5.h[2]\n"
+ "ble 82f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[3]\n"
+ "fmla z12.h, z6.h, z1.h[3]\n"
+ "fmla z16.h, z6.h, z2.h[3]\n"
+ "fmla z20.h, z6.h, z3.h[3]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[3]\n"
+ "fmla z28.h, z6.h, z5.h[3]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z7.h, z0.h[3]\n"
+ "fmla z13.h, z7.h, z1.h[3]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[3]\n"
+ "fmla z21.h, z7.h, z3.h[3]\n"
+ "fmla z25.h, z7.h, z4.h[3]\n"
+ "fmla z29.h, z7.h, z5.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[3]\n"
+ "fmla z14.h, z6.h, z1.h[3]\n"
+ "fmla z18.h, z6.h, z2.h[3]\n"
+ "fmla z22.h, z6.h, z3.h[3]\n"
+ "fmla z26.h, z6.h, z4.h[3]\n"
+ "fmla z30.h, z6.h, z5.h[3]\n"
+ "fmla z11.h, z7.h, z0.h[3]\n"
+ "fmla z15.h, z7.h, z1.h[3]\n"
+ "fmla z19.h, z7.h, z2.h[3]\n"
+ "fmla z23.h, z7.h, z3.h[3]\n"
+ "fmla z27.h, z7.h, z4.h[3]\n"
+ "fmla z31.h, z7.h, z5.h[3]\n"
+ "ble 82f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[4]\n"
+ "fmla z12.h, z6.h, z1.h[4]\n"
+ "fmla z16.h, z6.h, z2.h[4]\n"
+ "fmla z20.h, z6.h, z3.h[4]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[4]\n"
+ "fmla z28.h, z6.h, z5.h[4]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z7.h, z0.h[4]\n"
+ "fmla z13.h, z7.h, z1.h[4]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[4]\n"
+ "fmla z21.h, z7.h, z3.h[4]\n"
+ "fmla z25.h, z7.h, z4.h[4]\n"
+ "fmla z29.h, z7.h, z5.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[4]\n"
+ "fmla z14.h, z6.h, z1.h[4]\n"
+ "fmla z18.h, z6.h, z2.h[4]\n"
+ "fmla z22.h, z6.h, z3.h[4]\n"
+ "fmla z26.h, z6.h, z4.h[4]\n"
+ "fmla z30.h, z6.h, z5.h[4]\n"
+ "fmla z11.h, z7.h, z0.h[4]\n"
+ "fmla z15.h, z7.h, z1.h[4]\n"
+ "fmla z19.h, z7.h, z2.h[4]\n"
+ "fmla z23.h, z7.h, z3.h[4]\n"
+ "fmla z27.h, z7.h, z4.h[4]\n"
+ "fmla z31.h, z7.h, z5.h[4]\n"
+ "ble 82f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[5]\n"
+ "fmla z12.h, z6.h, z1.h[5]\n"
+ "fmla z16.h, z6.h, z2.h[5]\n"
+ "fmla z20.h, z6.h, z3.h[5]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[5]\n"
+ "fmla z28.h, z6.h, z5.h[5]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z7.h, z0.h[5]\n"
+ "fmla z13.h, z7.h, z1.h[5]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[5]\n"
+ "fmla z21.h, z7.h, z3.h[5]\n"
+ "fmla z25.h, z7.h, z4.h[5]\n"
+ "fmla z29.h, z7.h, z5.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[5]\n"
+ "fmla z14.h, z6.h, z1.h[5]\n"
+ "fmla z18.h, z6.h, z2.h[5]\n"
+ "fmla z22.h, z6.h, z3.h[5]\n"
+ "fmla z26.h, z6.h, z4.h[5]\n"
+ "fmla z30.h, z6.h, z5.h[5]\n"
+ "fmla z11.h, z7.h, z0.h[5]\n"
+ "fmla z15.h, z7.h, z1.h[5]\n"
+ "fmla z19.h, z7.h, z2.h[5]\n"
+ "fmla z23.h, z7.h, z3.h[5]\n"
+ "fmla z27.h, z7.h, z4.h[5]\n"
+ "fmla z31.h, z7.h, z5.h[5]\n"
+ "ble 82f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[6]\n"
+ "fmla z12.h, z6.h, z1.h[6]\n"
+ "fmla z16.h, z6.h, z2.h[6]\n"
+ "fmla z20.h, z6.h, z3.h[6]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, z6.h, z4.h[6]\n"
+ "fmla z28.h, z6.h, z5.h[6]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z7.h, z0.h[6]\n"
+ "fmla z13.h, z7.h, z1.h[6]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.h, z7.h, z2.h[6]\n"
+ "fmla z21.h, z7.h, z3.h[6]\n"
+ "fmla z25.h, z7.h, z4.h[6]\n"
+ "fmla z29.h, z7.h, z5.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[6]\n"
+ "fmla z14.h, z6.h, z1.h[6]\n"
+ "fmla z18.h, z6.h, z2.h[6]\n"
+ "fmla z22.h, z6.h, z3.h[6]\n"
+ "fmla z26.h, z6.h, z4.h[6]\n"
+ "fmla z30.h, z6.h, z5.h[6]\n"
+ "fmla z11.h, z7.h, z0.h[6]\n"
+ "fmla z15.h, z7.h, z1.h[6]\n"
+ "fmla z19.h, z7.h, z2.h[6]\n"
+ "fmla z23.h, z7.h, z3.h[6]\n"
+ "fmla z27.h, z7.h, z4.h[6]\n"
+ "fmla z31.h, z7.h, z5.h[6]\n"
+ "ble 82f\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "fmla z8.h, z6.h, z0.h[7]\n"
+ "fmla z12.h, z6.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z2.h[7]\n"
+ "fmla z20.h, z6.h, z3.h[7]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z24.h, z6.h, z4.h[7]\n"
+ "fmla z28.h, z6.h, z5.h[7]\n"
+ "ld1h { z6.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z9.h, z7.h, z0.h[7]\n"
+ "fmla z13.h, z7.h, z1.h[7]\n"
+ "fmla z17.h, z7.h, z2.h[7]\n"
+ "fmla z21.h, z7.h, z3.h[7]\n"
+ "fmla z25.h, z7.h, z4.h[7]\n"
+ "fmla z29.h, z7.h, z5.h[7]\n"
+ "ld1h { z7.h }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.h, z6.h, z0.h[7]\n"
+ "fmla z14.h, z6.h, z1.h[7]\n"
+ "fmla z18.h, z6.h, z2.h[7]\n"
+ "fmla z22.h, z6.h, z3.h[7]\n"
+ "fmla z26.h, z6.h, z4.h[7]\n"
+ "fmla z30.h, z6.h, z5.h[7]\n"
+ "fmla z11.h, z7.h, z0.h[7]\n"
+ "fmla z15.h, z7.h, z1.h[7]\n"
+ "fmla z19.h, z7.h, z2.h[7]\n"
+ "fmla z23.h, z7.h, z3.h[7]\n"
+ "fmla z27.h, z7.h, z4.h[7]\n"
+ "fmla z31.h, z7.h, z5.h[7]\n"
+ "82:" // Height 6: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 77b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #1\n"
+ "add x23, x24, x19, LSL #1\n"
+ "add x22, x23, x19, LSL #1\n"
+ "add x21, x22, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "tbz %x[flags], #1, 83f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rh { z1.h }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z0.h }, p5/Z, [x19]\n"
+ "fmin z8.h, p5/M, z8.h, z1.h\n"
+ "fmin z9.h, p5/M, z9.h, z1.h\n"
+ "fmin z10.h, p5/M, z10.h, z1.h\n"
+ "fmin z11.h, p5/M, z11.h, z1.h\n"
+ "fmin z12.h, p5/M, z12.h, z1.h\n"
+ "fmin z13.h, p5/M, z13.h, z1.h\n"
+ "fmin z14.h, p5/M, z14.h, z1.h\n"
+ "fmin z15.h, p5/M, z15.h, z1.h\n"
+ "fmin z16.h, p5/M, z16.h, z1.h\n"
+ "fmin z17.h, p5/M, z17.h, z1.h\n"
+ "fmin z18.h, p5/M, z18.h, z1.h\n"
+ "fmin z19.h, p5/M, z19.h, z1.h\n"
+ "fmin z20.h, p5/M, z20.h, z1.h\n"
+ "fmin z21.h, p5/M, z21.h, z1.h\n"
+ "fmin z22.h, p5/M, z22.h, z1.h\n"
+ "fmin z23.h, p5/M, z23.h, z1.h\n"
+ "fmin z24.h, p5/M, z24.h, z1.h\n"
+ "fmin z25.h, p5/M, z25.h, z1.h\n"
+ "fmin z26.h, p5/M, z26.h, z1.h\n"
+ "fmin z27.h, p5/M, z27.h, z1.h\n"
+ "fmin z28.h, p5/M, z28.h, z1.h\n"
+ "fmin z29.h, p5/M, z29.h, z1.h\n"
+ "fmin z30.h, p5/M, z30.h, z1.h\n"
+ "fmin z31.h, p5/M, z31.h, z1.h\n"
+ "fmax z8.h, p5/M, z8.h, z0.h\n"
+ "fmax z9.h, p5/M, z9.h, z0.h\n"
+ "fmax z10.h, p5/M, z10.h, z0.h\n"
+ "fmax z11.h, p5/M, z11.h, z0.h\n"
+ "fmax z12.h, p5/M, z12.h, z0.h\n"
+ "fmax z13.h, p5/M, z13.h, z0.h\n"
+ "fmax z14.h, p5/M, z14.h, z0.h\n"
+ "fmax z15.h, p5/M, z15.h, z0.h\n"
+ "fmax z16.h, p5/M, z16.h, z0.h\n"
+ "fmax z17.h, p5/M, z17.h, z0.h\n"
+ "fmax z18.h, p5/M, z18.h, z0.h\n"
+ "fmax z19.h, p5/M, z19.h, z0.h\n"
+ "fmax z20.h, p5/M, z20.h, z0.h\n"
+ "fmax z21.h, p5/M, z21.h, z0.h\n"
+ "fmax z22.h, p5/M, z22.h, z0.h\n"
+ "fmax z23.h, p5/M, z23.h, z0.h\n"
+ "fmax z24.h, p5/M, z24.h, z0.h\n"
+ "fmax z25.h, p5/M, z25.h, z0.h\n"
+ "fmax z26.h, p5/M, z26.h, z0.h\n"
+ "fmax z27.h, p5/M, z27.h, z0.h\n"
+ "fmax z28.h, p5/M, z28.h, z0.h\n"
+ "fmax z29.h, p5/M, z29.h, z0.h\n"
+ "fmax z30.h, p5/M, z30.h, z0.h\n"
+ "fmax z31.h, p5/M, z31.h, z0.h\n"
+ "83:" // Height 6: No activation
+ "st1h { z8.h }, p4, [x12]\n"
+ "st1h { z9.h }, p3, [x12, #1, MUL VL]\n"
+ "st1h { z10.h }, p2, [x12, #2, MUL VL]\n"
+ "st1h { z11.h }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1h { z12.h }, p4, [x24]\n"
+ "st1h { z13.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x23]\n"
+ "st1h { z17.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x22]\n"
+ "st1h { z21.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x21]\n"
+ "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x21, #3, MUL VL]\n"
+ "st1h { z28.h }, p4, [x20]\n"
+ "st1h { z29.h }, p3, [x20, #1, MUL VL]\n"
+ "st1h { z30.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z31.h }, p1, [x20, #3, MUL VL]\n"
+ "84:" // Height 6: Writeback done
+ "dech x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 72b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 86f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 85f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "85:" // Update direct input
+ "mov x19, #0xc\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "86:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp
new file mode 100644
index 0000000000..b4c124c1e3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<float>, \
+ size_t, size_t, \
+ const float *, \
+ size_t, \
+ IndirectOutputArg<float>, \
+ const float *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffhybrid_fp32_mla_6x4VL( ARGLIST );
+void sve_ffhybrid_fp32_mla_6x4VL_a64fx( ARGLIST );
+
+class cls_sve_ffhybrid_fp32_mla_6x4VL
+{
+public:
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<float>() * 1;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL1VL_BL32;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<float>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 1> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 15.27 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffhybrid_fp32_mla_6x4VL;
+ cls_sve_ffhybrid_fp32_mla_6x4VL(const CPUInfo *ci)
+ {
+ switch(ci->get_cpu_model()) {
+ default:
+ break;
+ case CPUModel::A64FX:
+ kernel=sve_ffhybrid_fp32_mla_6x4VL_a64fx;
+ break;
+ }
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
new file mode 100644
index 0000000000..7dd4e234d5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
@@ -0,0 +1,1530 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<float> A_arg,
+ size_t M, size_t N, const float *B_ptr, size_t B_stride, IndirectOutputArg<float> output_arg,
+ const float *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const float *B_ptr = {};
+ const float *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "ptrue p4.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 66f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 53f\n"
+ "beq 40f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 27f\n"
+ "beq 14f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "b 6f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 5f\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "b 6f\n"
+ "5:" // Height 1: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "6:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "7:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 8f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "b 9f\n"
+ "8:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "9:" // Height 1: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "ble 11f\n"
+ "10:" // Height 1: Multiply loop: Main loop
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "bgt 10b\n"
+ "11:" // Height 1: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "bne 7b\n"
+ "tbz %x[flags], #1, 12f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
+ "fmin z8.s, p4/M, z8.s, z1.s\n"
+ "fmin z9.s, p4/M, z9.s, z1.s\n"
+ "fmin z10.s, p4/M, z10.s, z1.s\n"
+ "fmin z11.s, p4/M, z11.s, z1.s\n"
+ "fmax z8.s, p4/M, z8.s, z0.s\n"
+ "fmax z9.s, p4/M, z9.s, z0.s\n"
+ "fmax z10.s, p4/M, z10.s, z0.s\n"
+ "fmax z11.s, p4/M, z11.s, z0.s\n"
+ "12:" // Height 1: No activation
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "13:" // Height 1: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 2b\n"
+ "b 80f\n"
+ "14:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "15:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 16f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 16f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 16f\n"
+ "mov x10, x11\n"
+ "16:" // Height 2: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 17f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "addvl x14, x14, #4\n"
+ "b 19f\n"
+ "17:" // Height 2: no bias
+ "tbz %x[flags], #0, 18f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "b 19f\n"
+ "18:" // Height 2: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "19:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "20:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 21f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 22f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "b 22f\n"
+ "21:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "22:" // Height 2: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "ble 24f\n"
+ "23:" // Height 2: Multiply loop: Main loop
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "add x24, x24, #0x4\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "addvl x9, x9, #1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "addvl x28, x28, #1\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "bgt 23b\n"
+ "24:" // Height 2: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "bne 20b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "tbz %x[flags], #1, 25f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
+ "fmin z8.s, p4/M, z8.s, z1.s\n"
+ "fmin z9.s, p4/M, z9.s, z1.s\n"
+ "fmin z10.s, p4/M, z10.s, z1.s\n"
+ "fmin z11.s, p4/M, z11.s, z1.s\n"
+ "fmin z12.s, p4/M, z12.s, z1.s\n"
+ "fmin z13.s, p4/M, z13.s, z1.s\n"
+ "fmin z14.s, p4/M, z14.s, z1.s\n"
+ "fmin z15.s, p4/M, z15.s, z1.s\n"
+ "fmax z8.s, p4/M, z8.s, z0.s\n"
+ "fmax z9.s, p4/M, z9.s, z0.s\n"
+ "fmax z10.s, p4/M, z10.s, z0.s\n"
+ "fmax z11.s, p4/M, z11.s, z0.s\n"
+ "fmax z12.s, p4/M, z12.s, z0.s\n"
+ "fmax z13.s, p4/M, z13.s, z0.s\n"
+ "fmax z14.s, p4/M, z14.s, z0.s\n"
+ "fmax z15.s, p4/M, z15.s, z0.s\n"
+ "25:" // Height 2: No activation
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "26:" // Height 2: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 15b\n"
+ "b 80f\n"
+ "27:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "28:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 29f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 29f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 29f\n"
+ "mov x10, x11\n"
+ "29:" // Height 3: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 30f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "b 32f\n"
+ "30:" // Height 3: no bias
+ "tbz %x[flags], #0, 31f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "b 32f\n"
+ "31:" // Height 3: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "32:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "33:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 34f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 35f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "b 35f\n"
+ "34:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "35:" // Height 3: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "ble 37f\n"
+ "36:" // Height 3: Multiply loop: Main loop
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x25, x25, #0x4\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x4\n"
+ "add x23, x23, #0x4\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "bgt 36b\n"
+ "37:" // Height 3: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "bne 33b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "tbz %x[flags], #1, 38f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
+ "fmin z8.s, p4/M, z8.s, z1.s\n"
+ "fmin z9.s, p4/M, z9.s, z1.s\n"
+ "fmin z10.s, p4/M, z10.s, z1.s\n"
+ "fmin z11.s, p4/M, z11.s, z1.s\n"
+ "fmin z12.s, p4/M, z12.s, z1.s\n"
+ "fmin z13.s, p4/M, z13.s, z1.s\n"
+ "fmin z14.s, p4/M, z14.s, z1.s\n"
+ "fmin z15.s, p4/M, z15.s, z1.s\n"
+ "fmin z16.s, p4/M, z16.s, z1.s\n"
+ "fmin z17.s, p4/M, z17.s, z1.s\n"
+ "fmin z18.s, p4/M, z18.s, z1.s\n"
+ "fmin z19.s, p4/M, z19.s, z1.s\n"
+ "fmax z8.s, p4/M, z8.s, z0.s\n"
+ "fmax z9.s, p4/M, z9.s, z0.s\n"
+ "fmax z10.s, p4/M, z10.s, z0.s\n"
+ "fmax z11.s, p4/M, z11.s, z0.s\n"
+ "fmax z12.s, p4/M, z12.s, z0.s\n"
+ "fmax z13.s, p4/M, z13.s, z0.s\n"
+ "fmax z14.s, p4/M, z14.s, z0.s\n"
+ "fmax z15.s, p4/M, z15.s, z0.s\n"
+ "fmax z16.s, p4/M, z16.s, z0.s\n"
+ "fmax z17.s, p4/M, z17.s, z0.s\n"
+ "fmax z18.s, p4/M, z18.s, z0.s\n"
+ "fmax z19.s, p4/M, z19.s, z0.s\n"
+ "38:" // Height 3: No activation
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "39:" // Height 3: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 28b\n"
+ "b 80f\n"
+ "40:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "41:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 42f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 42f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 42f\n"
+ "mov x10, x11\n"
+ "42:" // Height 4: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 43f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "b 45f\n"
+ "43:" // Height 4: no bias
+ "tbz %x[flags], #0, 44f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "b 45f\n"
+ "44:" // Height 4: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "45:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "46:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 47f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 48f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "b 48f\n"
+ "47:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "48:" // Height 4: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "ble 50f\n"
+ "49:" // Height 4: Multiply loop: Main loop
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z20.s, p4/M, z6.s, z3.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x25, x25, #0x4\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "subs x26, x26, #0x1\n"
+ "add x24, x24, #0x4\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "fmla z21.s, p4/M, z7.s, z3.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z22.s, p4/M, z6.s, z3.s\n"
+ "addvl x28, x28, #1\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "bgt 49b\n"
+ "50:" // Height 4: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z20.s, p4/M, z6.s, z3.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "cmp x27, x19\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "fmla z21.s, p4/M, z7.s, z3.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z22.s, p4/M, z6.s, z3.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "bne 46b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "tbz %x[flags], #1, 51f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
+ "fmin z8.s, p4/M, z8.s, z1.s\n"
+ "fmin z9.s, p4/M, z9.s, z1.s\n"
+ "fmin z10.s, p4/M, z10.s, z1.s\n"
+ "fmin z11.s, p4/M, z11.s, z1.s\n"
+ "fmin z12.s, p4/M, z12.s, z1.s\n"
+ "fmin z13.s, p4/M, z13.s, z1.s\n"
+ "fmin z14.s, p4/M, z14.s, z1.s\n"
+ "fmin z15.s, p4/M, z15.s, z1.s\n"
+ "fmin z16.s, p4/M, z16.s, z1.s\n"
+ "fmin z17.s, p4/M, z17.s, z1.s\n"
+ "fmin z18.s, p4/M, z18.s, z1.s\n"
+ "fmin z19.s, p4/M, z19.s, z1.s\n"
+ "fmin z20.s, p4/M, z20.s, z1.s\n"
+ "fmin z21.s, p4/M, z21.s, z1.s\n"
+ "fmin z22.s, p4/M, z22.s, z1.s\n"
+ "fmin z23.s, p4/M, z23.s, z1.s\n"
+ "fmax z8.s, p4/M, z8.s, z0.s\n"
+ "fmax z9.s, p4/M, z9.s, z0.s\n"
+ "fmax z10.s, p4/M, z10.s, z0.s\n"
+ "fmax z11.s, p4/M, z11.s, z0.s\n"
+ "fmax z12.s, p4/M, z12.s, z0.s\n"
+ "fmax z13.s, p4/M, z13.s, z0.s\n"
+ "fmax z14.s, p4/M, z14.s, z0.s\n"
+ "fmax z15.s, p4/M, z15.s, z0.s\n"
+ "fmax z16.s, p4/M, z16.s, z0.s\n"
+ "fmax z17.s, p4/M, z17.s, z0.s\n"
+ "fmax z18.s, p4/M, z18.s, z0.s\n"
+ "fmax z19.s, p4/M, z19.s, z0.s\n"
+ "fmax z20.s, p4/M, z20.s, z0.s\n"
+ "fmax z21.s, p4/M, z21.s, z0.s\n"
+ "fmax z22.s, p4/M, z22.s, z0.s\n"
+ "fmax z23.s, p4/M, z23.s, z0.s\n"
+ "51:" // Height 4: No activation
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "52:" // Height 4: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 41b\n"
+ "b 80f\n"
+ "53:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "54:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 55f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 55f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 55f\n"
+ "mov x10, x11\n"
+ "55:" // Height 5: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 56f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "b 58f\n"
+ "56:" // Height 5: no bias
+ "tbz %x[flags], #0, 57f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "b 58f\n"
+ "57:" // Height 5: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "58:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "59:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 60f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 61f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "b 61f\n"
+ "60:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "61:" // Height 5: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "ble 63f\n"
+ "62:" // Height 5: Multiply loop: Main loop
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z20.s, p4/M, z6.s, z3.s\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z24.s, p4/M, z6.s, z4.s\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x24, x24, #0x4\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
+ "fmla z21.s, p4/M, z7.s, z3.s\n"
+ "fmla z25.s, p4/M, z7.s, z4.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "add x21, x21, #0x4\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z22.s, p4/M, z6.s, z3.s\n"
+ "fmla z26.s, p4/M, z6.s, z4.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "fmla z27.s, p4/M, z7.s, z4.s\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "bgt 62b\n"
+ "63:" // Height 5: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z20.s, p4/M, z6.s, z3.s\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, p4/M, z6.s, z4.s\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.s, p4/M, z7.s, z3.s\n"
+ "fmla z25.s, p4/M, z7.s, z4.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z22.s, p4/M, z6.s, z3.s\n"
+ "fmla z26.s, p4/M, z6.s, z4.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "fmla z27.s, p4/M, z7.s, z4.s\n"
+ "bne 59b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "tbz %x[flags], #1, 64f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
+ "fmin z8.s, p4/M, z8.s, z1.s\n"
+ "fmin z9.s, p4/M, z9.s, z1.s\n"
+ "fmin z10.s, p4/M, z10.s, z1.s\n"
+ "fmin z11.s, p4/M, z11.s, z1.s\n"
+ "fmin z12.s, p4/M, z12.s, z1.s\n"
+ "fmin z13.s, p4/M, z13.s, z1.s\n"
+ "fmin z14.s, p4/M, z14.s, z1.s\n"
+ "fmin z15.s, p4/M, z15.s, z1.s\n"
+ "fmin z16.s, p4/M, z16.s, z1.s\n"
+ "fmin z17.s, p4/M, z17.s, z1.s\n"
+ "fmin z18.s, p4/M, z18.s, z1.s\n"
+ "fmin z19.s, p4/M, z19.s, z1.s\n"
+ "fmin z20.s, p4/M, z20.s, z1.s\n"
+ "fmin z21.s, p4/M, z21.s, z1.s\n"
+ "fmin z22.s, p4/M, z22.s, z1.s\n"
+ "fmin z23.s, p4/M, z23.s, z1.s\n"
+ "fmin z24.s, p4/M, z24.s, z1.s\n"
+ "fmin z25.s, p4/M, z25.s, z1.s\n"
+ "fmin z26.s, p4/M, z26.s, z1.s\n"
+ "fmin z27.s, p4/M, z27.s, z1.s\n"
+ "fmax z8.s, p4/M, z8.s, z0.s\n"
+ "fmax z9.s, p4/M, z9.s, z0.s\n"
+ "fmax z10.s, p4/M, z10.s, z0.s\n"
+ "fmax z11.s, p4/M, z11.s, z0.s\n"
+ "fmax z12.s, p4/M, z12.s, z0.s\n"
+ "fmax z13.s, p4/M, z13.s, z0.s\n"
+ "fmax z14.s, p4/M, z14.s, z0.s\n"
+ "fmax z15.s, p4/M, z15.s, z0.s\n"
+ "fmax z16.s, p4/M, z16.s, z0.s\n"
+ "fmax z17.s, p4/M, z17.s, z0.s\n"
+ "fmax z18.s, p4/M, z18.s, z0.s\n"
+ "fmax z19.s, p4/M, z19.s, z0.s\n"
+ "fmax z20.s, p4/M, z20.s, z0.s\n"
+ "fmax z21.s, p4/M, z21.s, z0.s\n"
+ "fmax z22.s, p4/M, z22.s, z0.s\n"
+ "fmax z23.s, p4/M, z23.s, z0.s\n"
+ "fmax z24.s, p4/M, z24.s, z0.s\n"
+ "fmax z25.s, p4/M, z25.s, z0.s\n"
+ "fmax z26.s, p4/M, z26.s, z0.s\n"
+ "fmax z27.s, p4/M, z27.s, z0.s\n"
+ "64:" // Height 5: No activation
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x21]\n"
+ "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
+ "65:" // Height 5: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 54b\n"
+ "b 80f\n"
+ "66:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "67:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 68f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 68f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 68f\n"
+ "mov x10, x11\n"
+ "68:" // Height 6: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p0.s, x19, x13\n"
+ "cbz x14, 69f\n"
+ "ld1w { z8.s }, p4/Z, [x14]\n"
+ "ld1w { z9.s }, p4/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p4/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "mov z28.d, z8.d\n"
+ "mov z29.d, z9.d\n"
+ "mov z30.d, z10.d\n"
+ "mov z31.d, z11.d\n"
+ "b 71f\n"
+ "69:" // Height 6: no bias
+ "tbz %x[flags], #0, 70f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p3/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x24]\n"
+ "ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z17.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x22]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x21]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p3/Z, [x20]\n"
+ "ld1w { z29.s }, p2/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p0/Z, [x20, #3, MUL VL]\n"
+ "b 71f\n"
+ "70:" // Height 6: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "71:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "72:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 73f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 74f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
+ "b 74f\n"
+ "73:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "74:" // Height 6: input setup done
+ "subs x26, x26, #0x1\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "ble 76f\n"
+ "75:" // Height 6: Multiply loop: Main loop
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z20.s, p4/M, z6.s, z3.s\n"
+ "add x25, x25, #0x4\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z24.s, p4/M, z6.s, z4.s\n"
+ "fmla z28.s, p4/M, z6.s, z5.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "add x24, x24, #0x4\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "add x23, x23, #0x4\n"
+ "add x22, x22, #0x4\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "fmla z21.s, p4/M, z7.s, z3.s\n"
+ "add x21, x21, #0x4\n"
+ "add x20, x20, #0x4\n"
+ "fmla z25.s, p4/M, z7.s, z4.s\n"
+ "fmla z29.s, p4/M, z7.s, z5.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z22.s, p4/M, z6.s, z3.s\n"
+ "fmla z26.s, p4/M, z6.s, z4.s\n"
+ "fmla z30.s, p4/M, z6.s, z5.s\n"
+ "ld1w { z6.s }, p4/Z, [x11]\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "ld1rw { z0.s }, p4/Z, [x25]\n"
+ "ld1rw { z1.s }, p4/Z, [x24]\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "ld1rw { z2.s }, p4/Z, [x23]\n"
+ "ld1rw { z3.s }, p4/Z, [x22]\n"
+ "fmla z27.s, p4/M, z7.s, z4.s\n"
+ "fmla z31.s, p4/M, z7.s, z5.s\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ "ld1w { z7.s }, p4/Z, [x10]\n"
+ "bgt 75b\n"
+ "76:" // Height 6: Multiply loop: Main loop skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "fmla z8.s, p4/M, z6.s, z0.s\n"
+ "fmla z12.s, p4/M, z6.s, z1.s\n"
+ "add x27, x27, #0x1\n"
+ "fmla z16.s, p4/M, z6.s, z2.s\n"
+ "fmla z20.s, p4/M, z6.s, z3.s\n"
+ "cmp x27, x19\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, p4/M, z6.s, z4.s\n"
+ "fmla z28.s, p4/M, z6.s, z5.s\n"
+ "ld1w { z6.s }, p4/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "fmla z13.s, p4/M, z7.s, z1.s\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.s, p4/M, z7.s, z2.s\n"
+ "fmla z21.s, p4/M, z7.s, z3.s\n"
+ "fmla z25.s, p4/M, z7.s, z4.s\n"
+ "fmla z29.s, p4/M, z7.s, z5.s\n"
+ "ld1w { z7.s }, p4/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, p4/M, z6.s, z0.s\n"
+ "fmla z14.s, p4/M, z6.s, z1.s\n"
+ "fmla z18.s, p4/M, z6.s, z2.s\n"
+ "fmla z22.s, p4/M, z6.s, z3.s\n"
+ "fmla z26.s, p4/M, z6.s, z4.s\n"
+ "fmla z30.s, p4/M, z6.s, z5.s\n"
+ "fmla z11.s, p4/M, z7.s, z0.s\n"
+ "fmla z15.s, p4/M, z7.s, z1.s\n"
+ "fmla z19.s, p4/M, z7.s, z2.s\n"
+ "fmla z23.s, p4/M, z7.s, z3.s\n"
+ "fmla z27.s, p4/M, z7.s, z4.s\n"
+ "fmla z31.s, p4/M, z7.s, z5.s\n"
+ "bne 72b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "tbz %x[flags], #1, 77f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p4/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p4/Z, [x19]\n"
+ "fmin z8.s, p4/M, z8.s, z1.s\n"
+ "fmin z9.s, p4/M, z9.s, z1.s\n"
+ "fmin z10.s, p4/M, z10.s, z1.s\n"
+ "fmin z11.s, p4/M, z11.s, z1.s\n"
+ "fmin z12.s, p4/M, z12.s, z1.s\n"
+ "fmin z13.s, p4/M, z13.s, z1.s\n"
+ "fmin z14.s, p4/M, z14.s, z1.s\n"
+ "fmin z15.s, p4/M, z15.s, z1.s\n"
+ "fmin z16.s, p4/M, z16.s, z1.s\n"
+ "fmin z17.s, p4/M, z17.s, z1.s\n"
+ "fmin z18.s, p4/M, z18.s, z1.s\n"
+ "fmin z19.s, p4/M, z19.s, z1.s\n"
+ "fmin z20.s, p4/M, z20.s, z1.s\n"
+ "fmin z21.s, p4/M, z21.s, z1.s\n"
+ "fmin z22.s, p4/M, z22.s, z1.s\n"
+ "fmin z23.s, p4/M, z23.s, z1.s\n"
+ "fmin z24.s, p4/M, z24.s, z1.s\n"
+ "fmin z25.s, p4/M, z25.s, z1.s\n"
+ "fmin z26.s, p4/M, z26.s, z1.s\n"
+ "fmin z27.s, p4/M, z27.s, z1.s\n"
+ "fmin z28.s, p4/M, z28.s, z1.s\n"
+ "fmin z29.s, p4/M, z29.s, z1.s\n"
+ "fmin z30.s, p4/M, z30.s, z1.s\n"
+ "fmin z31.s, p4/M, z31.s, z1.s\n"
+ "fmax z8.s, p4/M, z8.s, z0.s\n"
+ "fmax z9.s, p4/M, z9.s, z0.s\n"
+ "fmax z10.s, p4/M, z10.s, z0.s\n"
+ "fmax z11.s, p4/M, z11.s, z0.s\n"
+ "fmax z12.s, p4/M, z12.s, z0.s\n"
+ "fmax z13.s, p4/M, z13.s, z0.s\n"
+ "fmax z14.s, p4/M, z14.s, z0.s\n"
+ "fmax z15.s, p4/M, z15.s, z0.s\n"
+ "fmax z16.s, p4/M, z16.s, z0.s\n"
+ "fmax z17.s, p4/M, z17.s, z0.s\n"
+ "fmax z18.s, p4/M, z18.s, z0.s\n"
+ "fmax z19.s, p4/M, z19.s, z0.s\n"
+ "fmax z20.s, p4/M, z20.s, z0.s\n"
+ "fmax z21.s, p4/M, z21.s, z0.s\n"
+ "fmax z22.s, p4/M, z22.s, z0.s\n"
+ "fmax z23.s, p4/M, z23.s, z0.s\n"
+ "fmax z24.s, p4/M, z24.s, z0.s\n"
+ "fmax z25.s, p4/M, z25.s, z0.s\n"
+ "fmax z26.s, p4/M, z26.s, z0.s\n"
+ "fmax z27.s, p4/M, z27.s, z0.s\n"
+ "fmax z28.s, p4/M, z28.s, z0.s\n"
+ "fmax z29.s, p4/M, z29.s, z0.s\n"
+ "fmax z30.s, p4/M, z30.s, z0.s\n"
+ "fmax z31.s, p4/M, z31.s, z0.s\n"
+ "77:" // Height 6: No activation
+ "st1w { z8.s }, p3, [x12]\n"
+ "st1w { z9.s }, p2, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p1, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p3, [x24]\n"
+ "st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x23]\n"
+ "st1w { z17.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x22]\n"
+ "st1w { z21.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x21]\n"
+ "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z28.s }, p3, [x20]\n"
+ "st1w { z29.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z30.s }, p1, [x20, #2, MUL VL]\n"
+ "st1w { z31.s }, p0, [x20, #3, MUL VL]\n"
+ "78:" // Height 6: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 67b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 80f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 79f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "79:" // Update direct input
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "80:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
new file mode 100644
index 0000000000..3c7e562c89
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
@@ -0,0 +1,2310 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sve_ffhybrid_fp32_mla_6x4VL (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<float> A_arg,
+ size_t M, size_t N, const float *B_ptr, size_t B_stride, IndirectOutputArg<float> output_arg,
+ const float *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const float *B_ptr = {};
+ const float *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "ptrue p5.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 71f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 57f\n"
+ "beq 43f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 29f\n"
+ "beq 15f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "b 6f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 5f\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "b 6f\n"
+ "5:" // Height 1: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "6:" // Height 1: setup done
+ "mov x27, #0x0\n"
+ "7:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 8f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "cbnz x27, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "b 9f\n"
+ "8:" // Height 1: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "9:" // Height 1: input setup done
+ "cmp x26, #0x4\n"
+ "ble 11f\n"
+ "10:" // Height 1: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "cmp x26, #0x4\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "add x25, x25, #0x10\n"
+ "addvl x11, x11, #4\n"
+ "addvl x10, x10, #4\n"
+ "addvl x9, x9, #4\n"
+ "addvl x28, x28, #4\n"
+ "bgt 10b\n"
+ "11:" // Height 1: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 12f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "12:" // Height 1: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 7b\n"
+ "tbz %x[flags], #1, 13f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "13:" // Height 1: No activation
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "14:" // Height 1: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 2b\n"
+ "b 86f\n"
+ "15:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "16:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 17f\n"
+ "mov x10, x11\n"
+ "17:" // Height 2: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "addvl x14, x14, #4\n"
+ "b 20f\n"
+ "18:" // Height 2: no bias
+ "tbz %x[flags], #0, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "b 20f\n"
+ "19:" // Height 2: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "20:" // Height 2: setup done
+ "mov x27, #0x0\n"
+ "21:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 22f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "cbnz x27, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "b 23f\n"
+ "22:" // Height 2: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "23:" // Height 2: input setup done
+ "cmp x26, #0x4\n"
+ "ble 25f\n"
+ "24:" // Height 2: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "cmp x26, #0x4\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "bgt 24b\n"
+ "25:" // Height 2: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "ble 26f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "26:" // Height 2: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 21b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "tbz %x[flags], #1, 27f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "27:" // Height 2: No activation
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "28:" // Height 2: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 16b\n"
+ "b 86f\n"
+ "29:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "30:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 31f\n"
+ "mov x10, x11\n"
+ "31:" // Height 3: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "b 34f\n"
+ "32:" // Height 3: no bias
+ "tbz %x[flags], #0, 33f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "b 34f\n"
+ "33:" // Height 3: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "34:" // Height 3: setup done
+ "mov x27, #0x0\n"
+ "35:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 36f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "cbnz x27, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "b 37f\n"
+ "36:" // Height 3: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "37:" // Height 3: input setup done
+ "cmp x26, #0x4\n"
+ "ble 39f\n"
+ "38:" // Height 3: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "cmp x26, #0x4\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "bgt 38b\n"
+ "39:" // Height 3: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "ble 40f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "ble 40f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "ble 40f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "addvl x9, x9, #1\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "40:" // Height 3: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 35b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "tbz %x[flags], #1, 41f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "41:" // Height 3: No activation
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "42:" // Height 3: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 30b\n"
+ "b 86f\n"
+ "43:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "44:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 45f\n"
+ "mov x10, x11\n"
+ "45:" // Height 4: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "b 48f\n"
+ "46:" // Height 4: no bias
+ "tbz %x[flags], #0, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "b 48f\n"
+ "47:" // Height 4: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "48:" // Height 4: setup done
+ "mov x27, #0x0\n"
+ "49:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 50f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "cbnz x27, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "b 51f\n"
+ "50:" // Height 4: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "51:" // Height 4: input setup done
+ "cmp x26, #0x4\n"
+ "ble 53f\n"
+ "52:" // Height 4: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "cmp x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "fmla z21.s, z7.s, z3.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z22.s, z6.s, z3.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "fmla z23.s, z7.s, z3.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z20.s, z6.s, z3.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "fmla z21.s, z7.s, z3.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z22.s, z6.s, z3.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "fmla z23.s, z7.s, z3.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z20.s, z6.s, z3.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "fmla z21.s, z7.s, z3.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z22.s, z6.s, z3.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "fmla z23.s, z7.s, z3.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z20.s, z6.s, z3.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "fmla z21.s, z7.s, z3.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z22.s, z6.s, z3.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "fmla z23.s, z7.s, z3.s[3]\n"
+ "bgt 52b\n"
+ "53:" // Height 4: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "fmla z21.s, z7.s, z3.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z22.s, z6.s, z3.s[0]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "fmla z23.s, z7.s, z3.s[0]\n"
+ "ble 54f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z20.s, z6.s, z3.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "fmla z21.s, z7.s, z3.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z22.s, z6.s, z3.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "fmla z23.s, z7.s, z3.s[1]\n"
+ "ble 54f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z20.s, z6.s, z3.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "subs x26, x26, #0x1\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "fmla z21.s, z7.s, z3.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z22.s, z6.s, z3.s[2]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "fmla z23.s, z7.s, z3.s[2]\n"
+ "ble 54f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z20.s, z6.s, z3.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "fmla z21.s, z7.s, z3.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z22.s, z6.s, z3.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "fmla z23.s, z7.s, z3.s[3]\n"
+ "54:" // Height 4: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 49b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "tbz %x[flags], #1, 55f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z1.s\n"
+ "fmin z22.s, p5/M, z22.s, z1.s\n"
+ "fmin z23.s, p5/M, z23.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z0.s\n"
+ "fmax z22.s, p5/M, z22.s, z0.s\n"
+ "fmax z23.s, p5/M, z23.s, z0.s\n"
+ "55:" // Height 4: No activation
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "56:" // Height 4: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 44b\n"
+ "b 86f\n"
+ "57:" // Height 5
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "58:" // Height 5: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 59f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 59f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 59f\n"
+ "mov x10, x11\n"
+ "59:" // Height 5: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 60f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "b 62f\n"
+ "60:" // Height 5: no bias
+ "tbz %x[flags], #0, 61f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "b 62f\n"
+ "61:" // Height 5: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "62:" // Height 5: setup done
+ "mov x27, #0x0\n"
+ "63:" // Height 5: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 64f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "cbnz x27, 65f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "b 65f\n"
+ "64:" // Height 5: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "65:" // Height 5: input setup done
+ "cmp x26, #0x4\n"
+ "ble 67f\n"
+ "66:" // Height 5: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "cmp x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z24.s, z6.s, z4.s[0]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla z21.s, z7.s, z3.s[0]\n"
+ "fmla z25.s, z7.s, z4.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z22.s, z6.s, z3.s[0]\n"
+ "fmla z26.s, z6.s, z4.s[0]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "fmla z23.s, z7.s, z3.s[0]\n"
+ "fmla z27.s, z7.s, z4.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z20.s, z6.s, z3.s[1]\n"
+ "fmla z24.s, z6.s, z4.s[1]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "fmla z21.s, z7.s, z3.s[1]\n"
+ "fmla z25.s, z7.s, z4.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z22.s, z6.s, z3.s[1]\n"
+ "fmla z26.s, z6.s, z4.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "fmla z23.s, z7.s, z3.s[1]\n"
+ "fmla z27.s, z7.s, z4.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z20.s, z6.s, z3.s[2]\n"
+ "fmla z24.s, z6.s, z4.s[2]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "fmla z21.s, z7.s, z3.s[2]\n"
+ "fmla z25.s, z7.s, z4.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z22.s, z6.s, z3.s[2]\n"
+ "fmla z26.s, z6.s, z4.s[2]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "fmla z23.s, z7.s, z3.s[2]\n"
+ "fmla z27.s, z7.s, z4.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z20.s, z6.s, z3.s[3]\n"
+ "fmla z24.s, z6.s, z4.s[3]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "fmla z21.s, z7.s, z3.s[3]\n"
+ "fmla z25.s, z7.s, z4.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z22.s, z6.s, z3.s[3]\n"
+ "fmla z26.s, z6.s, z4.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "fmla z23.s, z7.s, z3.s[3]\n"
+ "fmla z27.s, z7.s, z4.s[3]\n"
+ "bgt 66b\n"
+ "67:" // Height 5: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, z6.s, z4.s[0]\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.s, z7.s, z3.s[0]\n"
+ "fmla z25.s, z7.s, z4.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z22.s, z6.s, z3.s[0]\n"
+ "fmla z26.s, z6.s, z4.s[0]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "fmla z23.s, z7.s, z3.s[0]\n"
+ "fmla z27.s, z7.s, z4.s[0]\n"
+ "ble 68f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z20.s, z6.s, z3.s[1]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, z6.s, z4.s[1]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.s, z7.s, z3.s[1]\n"
+ "fmla z25.s, z7.s, z4.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z22.s, z6.s, z3.s[1]\n"
+ "fmla z26.s, z6.s, z4.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "fmla z23.s, z7.s, z3.s[1]\n"
+ "fmla z27.s, z7.s, z4.s[1]\n"
+ "ble 68f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z20.s, z6.s, z3.s[2]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, z6.s, z4.s[2]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z21.s, z7.s, z3.s[2]\n"
+ "fmla z25.s, z7.s, z4.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z22.s, z6.s, z3.s[2]\n"
+ "fmla z26.s, z6.s, z4.s[2]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "fmla z23.s, z7.s, z3.s[2]\n"
+ "fmla z27.s, z7.s, z4.s[2]\n"
+ "ble 68f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z20.s, z6.s, z3.s[3]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z24.s, z6.s, z4.s[3]\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "fmla z21.s, z7.s, z3.s[3]\n"
+ "fmla z25.s, z7.s, z4.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z22.s, z6.s, z3.s[3]\n"
+ "fmla z26.s, z6.s, z4.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "fmla z23.s, z7.s, z3.s[3]\n"
+ "fmla z27.s, z7.s, z4.s[3]\n"
+ "68:" // Height 5: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 63b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "tbz %x[flags], #1, 69f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z1.s\n"
+ "fmin z22.s, p5/M, z22.s, z1.s\n"
+ "fmin z23.s, p5/M, z23.s, z1.s\n"
+ "fmin z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z1.s\n"
+ "fmin z26.s, p5/M, z26.s, z1.s\n"
+ "fmin z27.s, p5/M, z27.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z0.s\n"
+ "fmax z22.s, p5/M, z22.s, z0.s\n"
+ "fmax z23.s, p5/M, z23.s, z0.s\n"
+ "fmax z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z0.s\n"
+ "fmax z26.s, p5/M, z26.s, z0.s\n"
+ "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "69:" // Height 5: No activation
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "70:" // Height 5: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 58b\n"
+ "b 86f\n"
+ "71:" // Height 6
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "72:" // Height 6: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #2\n"
+ "cntw x20, ALL, MUL #3\n"
+ "add x9, x10, x19, LSL #2\n"
+ "add x28, x9, x19, LSL #2\n"
+ "add x19, x28, x19, LSL #2\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 73f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 73f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 73f\n"
+ "mov x10, x11\n"
+ "73:" // Height 6: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 74f\n"
+ "ld1w { z8.s }, p5/Z, [x14]\n"
+ "ld1w { z9.s }, p5/Z, [x14, #1, MUL VL]\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
+ "ld1w { z10.s }, p5/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p5/Z, [x14, #3, MUL VL]\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
+ "mov z16.d, z8.d\n"
+ "mov z17.d, z9.d\n"
+ "addvl x14, x14, #4\n"
+ "mov z18.d, z10.d\n"
+ "mov z19.d, z11.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z24.d, z8.d\n"
+ "mov z25.d, z9.d\n"
+ "mov z26.d, z10.d\n"
+ "mov z27.d, z11.d\n"
+ "mov z28.d, z8.d\n"
+ "mov z29.d, z9.d\n"
+ "mov z30.d, z10.d\n"
+ "mov z31.d, z11.d\n"
+ "b 76f\n"
+ "74:" // Height 6: no bias
+ "tbz %x[flags], #0, 75f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "ld1w { z8.s }, p4/Z, [x12]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z11.s }, p1/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x23]\n"
+ "ld1w { z17.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z24.s }, p4/Z, [x21]\n"
+ "ld1w { z25.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "b 76f\n"
+ "75:" // Height 6: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "76:" // Height 6: setup done
+ "mov x27, #0x0\n"
+ "77:" // Height 6: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w26, [x19, x27, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 78f\n"
+ "ldr x20, [%x[input_ptr], x27, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x25, [x20, #0x0]\n"
+ "ldr x24, [x20, #0x8]\n"
+ "ldr x23, [x20, #0x10]\n"
+ "ldr x22, [x20, #0x18]\n"
+ "ldr x21, [x20, #0x20]\n"
+ "ldr x20, [x20, #0x28]\n"
+ "cbnz x27, 79f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x25, x25, x19, LSL #2\n"
+ "add x24, x24, x19, LSL #2\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
+ "b 79f\n"
+ "78:" // Height 6: setup direct input
+ "mov x25, %x[input_ptr]\n"
+ "add x24, x25, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "79:" // Height 6: input setup done
+ "cmp x26, #0x4\n"
+ "ble 81f\n"
+ "80:" // Height 6: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "sub x26, x26, #0x4\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "cmp x26, #0x4\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla z24.s, z6.s, z4.s[0]\n"
+ "fmla z28.s, z6.s, z5.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "add x20, x20, #0x10\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "fmla z21.s, z7.s, z3.s[0]\n"
+ "fmla z25.s, z7.s, z4.s[0]\n"
+ "fmla z29.s, z7.s, z5.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z22.s, z6.s, z3.s[0]\n"
+ "fmla z26.s, z6.s, z4.s[0]\n"
+ "fmla z30.s, z6.s, z5.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "fmla z23.s, z7.s, z3.s[0]\n"
+ "fmla z27.s, z7.s, z4.s[0]\n"
+ "fmla z31.s, z7.s, z5.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z20.s, z6.s, z3.s[1]\n"
+ "fmla z24.s, z6.s, z4.s[1]\n"
+ "fmla z28.s, z6.s, z5.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "fmla z21.s, z7.s, z3.s[1]\n"
+ "fmla z25.s, z7.s, z4.s[1]\n"
+ "fmla z29.s, z7.s, z5.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #1, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z22.s, z6.s, z3.s[1]\n"
+ "fmla z26.s, z6.s, z4.s[1]\n"
+ "fmla z30.s, z6.s, z5.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "fmla z23.s, z7.s, z3.s[1]\n"
+ "fmla z27.s, z7.s, z4.s[1]\n"
+ "fmla z31.s, z7.s, z5.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z20.s, z6.s, z3.s[2]\n"
+ "fmla z24.s, z6.s, z4.s[2]\n"
+ "fmla z28.s, z6.s, z5.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "fmla z21.s, z7.s, z3.s[2]\n"
+ "fmla z25.s, z7.s, z4.s[2]\n"
+ "fmla z29.s, z7.s, z5.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #2, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z22.s, z6.s, z3.s[2]\n"
+ "fmla z26.s, z6.s, z4.s[2]\n"
+ "fmla z30.s, z6.s, z5.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "fmla z23.s, z7.s, z3.s[2]\n"
+ "fmla z27.s, z7.s, z4.s[2]\n"
+ "fmla z31.s, z7.s, z5.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z20.s, z6.s, z3.s[3]\n"
+ "fmla z24.s, z6.s, z4.s[3]\n"
+ "fmla z28.s, z6.s, z5.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "fmla z21.s, z7.s, z3.s[3]\n"
+ "fmla z25.s, z7.s, z4.s[3]\n"
+ "fmla z29.s, z7.s, z5.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z22.s, z6.s, z3.s[3]\n"
+ "fmla z26.s, z6.s, z4.s[3]\n"
+ "fmla z30.s, z6.s, z5.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "fmla z23.s, z7.s, z3.s[3]\n"
+ "fmla z27.s, z7.s, z4.s[3]\n"
+ "fmla z31.s, z7.s, z5.s[3]\n"
+ "bgt 80b\n"
+ "81:" // Height 6: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x26\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "subs x26, x26, #0x1\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
+ "ld1rqw { z3.s }, p0/Z, [x22]\n"
+ "ld1rqw { z4.s }, p0/Z, [x21]\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z20.s, z6.s, z3.s[0]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z24.s, z6.s, z4.s[0]\n"
+ "fmla z28.s, z6.s, z5.s[0]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z9.s, z7.s, z0.s[0]\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
+ "fmla z21.s, z7.s, z3.s[0]\n"
+ "fmla z25.s, z7.s, z4.s[0]\n"
+ "fmla z29.s, z7.s, z5.s[0]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z14.s, z6.s, z1.s[0]\n"
+ "fmla z18.s, z6.s, z2.s[0]\n"
+ "fmla z22.s, z6.s, z3.s[0]\n"
+ "fmla z26.s, z6.s, z4.s[0]\n"
+ "fmla z30.s, z6.s, z5.s[0]\n"
+ "fmla z11.s, z7.s, z0.s[0]\n"
+ "fmla z15.s, z7.s, z1.s[0]\n"
+ "fmla z19.s, z7.s, z2.s[0]\n"
+ "fmla z23.s, z7.s, z3.s[0]\n"
+ "fmla z27.s, z7.s, z4.s[0]\n"
+ "fmla z31.s, z7.s, z5.s[0]\n"
+ "ble 82f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[1]\n"
+ "fmla z12.s, z6.s, z1.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[1]\n"
+ "fmla z20.s, z6.s, z3.s[1]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, z6.s, z4.s[1]\n"
+ "fmla z28.s, z6.s, z5.s[1]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.s, z7.s, z0.s[1]\n"
+ "fmla z13.s, z7.s, z1.s[1]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.s, z7.s, z2.s[1]\n"
+ "fmla z21.s, z7.s, z3.s[1]\n"
+ "fmla z25.s, z7.s, z4.s[1]\n"
+ "fmla z29.s, z7.s, z5.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[1]\n"
+ "fmla z14.s, z6.s, z1.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[1]\n"
+ "fmla z22.s, z6.s, z3.s[1]\n"
+ "fmla z26.s, z6.s, z4.s[1]\n"
+ "fmla z30.s, z6.s, z5.s[1]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z15.s, z7.s, z1.s[1]\n"
+ "fmla z19.s, z7.s, z2.s[1]\n"
+ "fmla z23.s, z7.s, z3.s[1]\n"
+ "fmla z27.s, z7.s, z4.s[1]\n"
+ "fmla z31.s, z7.s, z5.s[1]\n"
+ "ble 82f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[2]\n"
+ "fmla z12.s, z6.s, z1.s[2]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z20.s, z6.s, z3.s[2]\n"
+ "subs x26, x26, #0x1\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, z6.s, z4.s[2]\n"
+ "fmla z28.s, z6.s, z5.s[2]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.s, z7.s, z0.s[2]\n"
+ "fmla z13.s, z7.s, z1.s[2]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z17.s, z7.s, z2.s[2]\n"
+ "fmla z21.s, z7.s, z3.s[2]\n"
+ "fmla z25.s, z7.s, z4.s[2]\n"
+ "fmla z29.s, z7.s, z5.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[2]\n"
+ "fmla z14.s, z6.s, z1.s[2]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z22.s, z6.s, z3.s[2]\n"
+ "fmla z26.s, z6.s, z4.s[2]\n"
+ "fmla z30.s, z6.s, z5.s[2]\n"
+ "fmla z11.s, z7.s, z0.s[2]\n"
+ "fmla z15.s, z7.s, z1.s[2]\n"
+ "fmla z19.s, z7.s, z2.s[2]\n"
+ "fmla z23.s, z7.s, z3.s[2]\n"
+ "fmla z27.s, z7.s, z4.s[2]\n"
+ "fmla z31.s, z7.s, z5.s[2]\n"
+ "ble 82f\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "fmla z8.s, z6.s, z0.s[3]\n"
+ "fmla z12.s, z6.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[3]\n"
+ "fmla z20.s, z6.s, z3.s[3]\n"
+ "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z24.s, z6.s, z4.s[3]\n"
+ "fmla z28.s, z6.s, z5.s[3]\n"
+ "ld1w { z6.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z9.s, z7.s, z0.s[3]\n"
+ "fmla z13.s, z7.s, z1.s[3]\n"
+ "fmla z17.s, z7.s, z2.s[3]\n"
+ "fmla z21.s, z7.s, z3.s[3]\n"
+ "fmla z25.s, z7.s, z4.s[3]\n"
+ "fmla z29.s, z7.s, z5.s[3]\n"
+ "ld1w { z7.s }, p5/Z, [x28]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z10.s, z6.s, z0.s[3]\n"
+ "fmla z14.s, z6.s, z1.s[3]\n"
+ "fmla z18.s, z6.s, z2.s[3]\n"
+ "fmla z22.s, z6.s, z3.s[3]\n"
+ "fmla z26.s, z6.s, z4.s[3]\n"
+ "fmla z30.s, z6.s, z5.s[3]\n"
+ "fmla z11.s, z7.s, z0.s[3]\n"
+ "fmla z15.s, z7.s, z1.s[3]\n"
+ "fmla z19.s, z7.s, z2.s[3]\n"
+ "fmla z23.s, z7.s, z3.s[3]\n"
+ "fmla z27.s, z7.s, z4.s[3]\n"
+ "fmla z31.s, z7.s, z5.s[3]\n"
+ "82:" // Height 6: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
+ "cmp x27, x19\n"
+ "bne 77b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x12, x19, LSL #2\n"
+ "add x23, x24, x19, LSL #2\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "tbz %x[flags], #1, 83f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p5/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x19]\n"
+ "fmin z8.s, p5/M, z8.s, z1.s\n"
+ "fmin z9.s, p5/M, z9.s, z1.s\n"
+ "fmin z10.s, p5/M, z10.s, z1.s\n"
+ "fmin z11.s, p5/M, z11.s, z1.s\n"
+ "fmin z12.s, p5/M, z12.s, z1.s\n"
+ "fmin z13.s, p5/M, z13.s, z1.s\n"
+ "fmin z14.s, p5/M, z14.s, z1.s\n"
+ "fmin z15.s, p5/M, z15.s, z1.s\n"
+ "fmin z16.s, p5/M, z16.s, z1.s\n"
+ "fmin z17.s, p5/M, z17.s, z1.s\n"
+ "fmin z18.s, p5/M, z18.s, z1.s\n"
+ "fmin z19.s, p5/M, z19.s, z1.s\n"
+ "fmin z20.s, p5/M, z20.s, z1.s\n"
+ "fmin z21.s, p5/M, z21.s, z1.s\n"
+ "fmin z22.s, p5/M, z22.s, z1.s\n"
+ "fmin z23.s, p5/M, z23.s, z1.s\n"
+ "fmin z24.s, p5/M, z24.s, z1.s\n"
+ "fmin z25.s, p5/M, z25.s, z1.s\n"
+ "fmin z26.s, p5/M, z26.s, z1.s\n"
+ "fmin z27.s, p5/M, z27.s, z1.s\n"
+ "fmin z28.s, p5/M, z28.s, z1.s\n"
+ "fmin z29.s, p5/M, z29.s, z1.s\n"
+ "fmin z30.s, p5/M, z30.s, z1.s\n"
+ "fmin z31.s, p5/M, z31.s, z1.s\n"
+ "fmax z8.s, p5/M, z8.s, z0.s\n"
+ "fmax z9.s, p5/M, z9.s, z0.s\n"
+ "fmax z10.s, p5/M, z10.s, z0.s\n"
+ "fmax z11.s, p5/M, z11.s, z0.s\n"
+ "fmax z12.s, p5/M, z12.s, z0.s\n"
+ "fmax z13.s, p5/M, z13.s, z0.s\n"
+ "fmax z14.s, p5/M, z14.s, z0.s\n"
+ "fmax z15.s, p5/M, z15.s, z0.s\n"
+ "fmax z16.s, p5/M, z16.s, z0.s\n"
+ "fmax z17.s, p5/M, z17.s, z0.s\n"
+ "fmax z18.s, p5/M, z18.s, z0.s\n"
+ "fmax z19.s, p5/M, z19.s, z0.s\n"
+ "fmax z20.s, p5/M, z20.s, z0.s\n"
+ "fmax z21.s, p5/M, z21.s, z0.s\n"
+ "fmax z22.s, p5/M, z22.s, z0.s\n"
+ "fmax z23.s, p5/M, z23.s, z0.s\n"
+ "fmax z24.s, p5/M, z24.s, z0.s\n"
+ "fmax z25.s, p5/M, z25.s, z0.s\n"
+ "fmax z26.s, p5/M, z26.s, z0.s\n"
+ "fmax z27.s, p5/M, z27.s, z0.s\n"
+ "fmax z28.s, p5/M, z28.s, z0.s\n"
+ "fmax z29.s, p5/M, z29.s, z0.s\n"
+ "fmax z30.s, p5/M, z30.s, z0.s\n"
+ "fmax z31.s, p5/M, z31.s, z0.s\n"
+ "83:" // Height 6: No activation
+ "st1w { z8.s }, p4, [x12]\n"
+ "st1w { z9.s }, p3, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "st1w { z12.s }, p4, [x24]\n"
+ "st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x23]\n"
+ "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x22]\n"
+ "st1w { z21.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x21]\n"
+ "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x20]\n"
+ "st1w { z29.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x20, #3, MUL VL]\n"
+ "84:" // Height 6: Writeback done
+ "decw x13, ALL, MUL #4\n"
+ "cmp x13, XZR\n"
+ "bgt 72b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 86f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 85f\n"
+ "add x20, x20, #0x6\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "85:" // Update direct input
+ "mov x19, #0x18\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "86:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp
new file mode 100644
index 0000000000..3ee3e31206
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<float>, \
+ size_t, size_t, \
+ const bfloat16 *, \
+ size_t, \
+ IndirectOutputArg<float>, \
+ const float *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL( ARGLIST );
+
+class cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL
+{
+public:
+ typedef float lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 4;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<float>() * 1;
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL2VL_BL64_BF16;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<float>() * 6;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsSVE<rhs_operand_type, result_type, 4, 12, 4> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 32.35 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffhybrid_fp32bf16fp32_mmla_4x6VL;
+ cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
new file mode 100644
index 0000000000..8e3676a007
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
@@ -0,0 +1,1464 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+#include "../../bfloat.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<float> A_arg,
+ size_t M, size_t N, const bfloat16 *B_ptr, size_t B_stride, IndirectOutputArg<float> output_arg,
+ const float *bias, Activation act, bool accumulate
+)
+{
+ struct KernelArgs {
+ float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
+ float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const bfloat16 *B_ptr = {};
+ const bfloat16 *cur_B_ptr = {};
+ size_t B_stride = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *output_ptr;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ ka.B_stride = B_stride;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "ptrue p7.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x4\n"
+ "bge 43f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 29f\n"
+ "beq 15f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "2:" // Height 1: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 3f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 3f\n"
+ "mov x10, x11\n"
+ "3:" // Height 1: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 4f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
+ "zip2 z16.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "zip2 z17.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "addvl x14, x14, #6\n"
+ "zip2 z18.d, z12.d, z12.d\n"
+ "zip1 z12.d, z12.d, z12.d\n"
+ "zip2 z19.d, z13.d, z13.d\n"
+ "zip1 z13.d, z13.d, z13.d\n"
+ "b 6f\n"
+ "4:" // Height 1: no bias
+ "tbz %x[flags], #0, 5f\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "zip1 z8.d, z9.d, z14.d\n"
+ "zip2 z14.d, z9.d, z14.d\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "zip1 z9.d, z10.d, z15.d\n"
+ "zip2 z15.d, z10.d, z15.d\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
+ "zip1 z10.d, z11.d, z16.d\n"
+ "zip2 z16.d, z11.d, z16.d\n"
+ "zip1 z11.d, z12.d, z17.d\n"
+ "zip2 z17.d, z12.d, z17.d\n"
+ "zip1 z12.d, z13.d, z18.d\n"
+ "zip2 z18.d, z13.d, z18.d\n"
+ "zip1 z13.d, z20.d, z19.d\n"
+ "zip2 z19.d, z20.d, z19.d\n"
+ "b 6f\n"
+ "5:" // Height 1: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "6:" // Height 1: setup done
+ "mov x25, #0x0\n"
+ "7:" // Height 1: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 8f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "cbnz x25, 9f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "b 9f\n"
+ "8:" // Height 1: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "9:" // Height 1: input setup done
+ "cmp x24, #0x4\n"
+ "ble 11f\n"
+ "10:" // Height 1: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x4\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ "add x23, x23, #0x10\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ "bgt 10b\n"
+ "11:" // Height 1: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ "12:" // Height 1: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 7b\n"
+ "uzp1 z8.d, z8.d, z14.d\n"
+ "uzp1 z9.d, z9.d, z15.d\n"
+ "uzp1 z10.d, z10.d, z16.d\n"
+ "uzp1 z11.d, z11.d, z17.d\n"
+ "uzp1 z12.d, z12.d, z18.d\n"
+ "uzp1 z13.d, z13.d, z19.d\n"
+ "tbz %x[flags], #1, 13f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
+ "fmin z8.s, p7/M, z8.s, z1.s\n"
+ "fmin z9.s, p7/M, z9.s, z1.s\n"
+ "fmin z10.s, p7/M, z10.s, z1.s\n"
+ "fmin z11.s, p7/M, z11.s, z1.s\n"
+ "fmin z12.s, p7/M, z12.s, z1.s\n"
+ "fmin z13.s, p7/M, z13.s, z1.s\n"
+ "fmax z8.s, p7/M, z8.s, z0.s\n"
+ "fmax z9.s, p7/M, z9.s, z0.s\n"
+ "fmax z10.s, p7/M, z10.s, z0.s\n"
+ "fmax z11.s, p7/M, z11.s, z0.s\n"
+ "fmax z12.s, p7/M, z12.s, z0.s\n"
+ "fmax z13.s, p7/M, z13.s, z0.s\n"
+ "13:" // Height 1: No activation
+ "st1w { z8.s }, p6, [x12]\n"
+ "st1w { z9.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
+ "14:" // Height 1: Writeback done
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
+ "bgt 2b\n"
+ "b 58f\n"
+ "15:" // Height 2
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "16:" // Height 2: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 17f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 17f\n"
+ "mov x10, x11\n"
+ "17:" // Height 2: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 18f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
+ "zip2 z16.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "zip2 z17.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "addvl x14, x14, #6\n"
+ "zip2 z18.d, z12.d, z12.d\n"
+ "zip1 z12.d, z12.d, z12.d\n"
+ "zip2 z19.d, z13.d, z13.d\n"
+ "zip1 z13.d, z13.d, z13.d\n"
+ "b 20f\n"
+ "18:" // Height 2: no bias
+ "tbz %x[flags], #0, 19f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
+ "zip1 z8.d, z9.d, z14.d\n"
+ "zip2 z14.d, z9.d, z14.d\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "zip1 z9.d, z10.d, z15.d\n"
+ "zip2 z15.d, z10.d, z15.d\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "zip1 z10.d, z11.d, z16.d\n"
+ "zip2 z16.d, z11.d, z16.d\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
+ "zip1 z11.d, z12.d, z17.d\n"
+ "zip2 z17.d, z12.d, z17.d\n"
+ "zip1 z12.d, z13.d, z18.d\n"
+ "zip2 z18.d, z13.d, z18.d\n"
+ "zip1 z13.d, z20.d, z19.d\n"
+ "zip2 z19.d, z20.d, z19.d\n"
+ "b 20f\n"
+ "19:" // Height 2: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "20:" // Height 2: setup done
+ "mov x25, #0x0\n"
+ "21:" // Height 2: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 22f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "cbnz x25, 23f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "b 23f\n"
+ "22:" // Height 2: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "23:" // Height 2: input setup done
+ "cmp x24, #0x4\n"
+ "ble 25f\n"
+ "24:" // Height 2: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
+ "trn1 z0.d, z0.d, z1.d\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ "sub x24, x24, #0x4\n"
+ "cmp x24, #0x4\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ "bgt 24b\n"
+ "25:" // Height 2: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
+ "trn1 z0.d, z0.d, z1.d\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ "26:" // Height 2: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 21b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z4.d, z8.d, z14.d\n"
+ "uzp2 z8.d, z8.d, z14.d\n"
+ "add x22, x12, x19, LSL #2\n"
+ "uzp1 z14.d, z9.d, z15.d\n"
+ "uzp2 z9.d, z9.d, z15.d\n"
+ "uzp1 z15.d, z10.d, z16.d\n"
+ "uzp2 z10.d, z10.d, z16.d\n"
+ "uzp1 z16.d, z11.d, z17.d\n"
+ "uzp2 z11.d, z11.d, z17.d\n"
+ "uzp1 z17.d, z12.d, z18.d\n"
+ "uzp2 z12.d, z12.d, z18.d\n"
+ "uzp1 z18.d, z13.d, z19.d\n"
+ "uzp2 z13.d, z13.d, z19.d\n"
+ "tbz %x[flags], #1, 27f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
+ "fmin z4.s, p7/M, z4.s, z1.s\n"
+ "fmin z14.s, p7/M, z14.s, z1.s\n"
+ "fmin z15.s, p7/M, z15.s, z1.s\n"
+ "fmin z16.s, p7/M, z16.s, z1.s\n"
+ "fmin z17.s, p7/M, z17.s, z1.s\n"
+ "fmin z18.s, p7/M, z18.s, z1.s\n"
+ "fmin z8.s, p7/M, z8.s, z1.s\n"
+ "fmin z9.s, p7/M, z9.s, z1.s\n"
+ "fmin z10.s, p7/M, z10.s, z1.s\n"
+ "fmin z11.s, p7/M, z11.s, z1.s\n"
+ "fmin z12.s, p7/M, z12.s, z1.s\n"
+ "fmin z13.s, p7/M, z13.s, z1.s\n"
+ "fmax z4.s, p7/M, z4.s, z0.s\n"
+ "fmax z14.s, p7/M, z14.s, z0.s\n"
+ "fmax z15.s, p7/M, z15.s, z0.s\n"
+ "fmax z16.s, p7/M, z16.s, z0.s\n"
+ "fmax z17.s, p7/M, z17.s, z0.s\n"
+ "fmax z18.s, p7/M, z18.s, z0.s\n"
+ "fmax z8.s, p7/M, z8.s, z0.s\n"
+ "fmax z9.s, p7/M, z9.s, z0.s\n"
+ "fmax z10.s, p7/M, z10.s, z0.s\n"
+ "fmax z11.s, p7/M, z11.s, z0.s\n"
+ "fmax z12.s, p7/M, z12.s, z0.s\n"
+ "fmax z13.s, p7/M, z13.s, z0.s\n"
+ "27:" // Height 2: No activation
+ "st1w { z4.s }, p6, [x12]\n"
+ "st1w { z14.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
+ "28:" // Height 2: Writeback done
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
+ "bgt 16b\n"
+ "b 58f\n"
+ "29:" // Height 3
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "mov x14, %x[bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x12, %x[output_ptr]\n"
+ "30:" // Height 3: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 31f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 31f\n"
+ "mov x10, x11\n"
+ "31:" // Height 3: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 32f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
+ "zip2 z16.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "zip2 z17.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "addvl x14, x14, #6\n"
+ "zip2 z18.d, z12.d, z12.d\n"
+ "zip1 z12.d, z12.d, z12.d\n"
+ "zip2 z19.d, z13.d, z13.d\n"
+ "zip1 z13.d, z13.d, z13.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z26.d, z14.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z27.d, z15.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z28.d, z16.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z29.d, z17.d\n"
+ "mov z24.d, z12.d\n"
+ "mov z30.d, z18.d\n"
+ "mov z25.d, z13.d\n"
+ "mov z31.d, z19.d\n"
+ "b 34f\n"
+ "32:" // Height 3: no bias
+ "tbz %x[flags], #0, 33f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
+ "zip1 z8.d, z9.d, z14.d\n"
+ "zip2 z14.d, z9.d, z14.d\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "zip1 z9.d, z10.d, z15.d\n"
+ "zip2 z15.d, z10.d, z15.d\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "zip1 z10.d, z11.d, z16.d\n"
+ "zip2 z16.d, z11.d, z16.d\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z21.s }, p6/Z, [x21]\n"
+ "zip1 z11.d, z12.d, z17.d\n"
+ "zip2 z17.d, z12.d, z17.d\n"
+ "ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
+ "zip1 z12.d, z13.d, z18.d\n"
+ "zip2 z18.d, z13.d, z18.d\n"
+ "ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "zip1 z13.d, z20.d, z19.d\n"
+ "zip2 z19.d, z20.d, z19.d\n"
+ "ld1w { z4.s }, p1/Z, [x21, #5, MUL VL]\n"
+ "zip1 z20.d, z21.d, z26.d\n"
+ "zip2 z26.d, z21.d, z26.d\n"
+ "zip1 z21.d, z22.d, z27.d\n"
+ "zip2 z27.d, z22.d, z27.d\n"
+ "zip1 z22.d, z23.d, z28.d\n"
+ "zip2 z28.d, z23.d, z28.d\n"
+ "zip1 z23.d, z24.d, z29.d\n"
+ "zip2 z29.d, z24.d, z29.d\n"
+ "zip1 z24.d, z25.d, z30.d\n"
+ "zip2 z30.d, z25.d, z30.d\n"
+ "zip1 z25.d, z4.d, z31.d\n"
+ "zip2 z31.d, z4.d, z31.d\n"
+ "b 34f\n"
+ "33:" // Height 3: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "34:" // Height 3: setup done
+ "mov x25, #0x0\n"
+ "35:" // Height 3: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 36f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "cbnz x25, 37f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "b 37f\n"
+ "36:" // Height 3: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "37:" // Height 3: input setup done
+ "cmp x24, #0x4\n"
+ "ble 39f\n"
+ "38:" // Height 3: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
+ ".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "trn1 z0.d, z0.d, z1.d\n"
+ "uzp1 z2.h, z2.h, z2.h\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "sub x24, x24, #0x4\n"
+ ".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "cmp x24, #0x4\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ "addvl x28, x28, #2\n"
+ "addvl x27, x27, #2\n"
+ ".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0x6466e459 // bfmmla z25.s, z2.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ ".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
+ "bgt 38b\n"
+ "39:" // Height 3: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
+ ".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "trn1 z0.d, z0.d, z1.d\n"
+ "uzp1 z2.h, z2.h, z2.h\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ ".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ ".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ ".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e459 // bfmmla z25.s, z2.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ ".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
+ "40:" // Height 3: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 35b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "uzp1 z4.d, z8.d, z14.d\n"
+ "uzp2 z8.d, z8.d, z14.d\n"
+ "uzp1 z14.d, z9.d, z15.d\n"
+ "uzp2 z9.d, z9.d, z15.d\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp1 z15.d, z10.d, z16.d\n"
+ "uzp2 z10.d, z10.d, z16.d\n"
+ "uzp1 z16.d, z11.d, z17.d\n"
+ "uzp2 z11.d, z11.d, z17.d\n"
+ "uzp1 z17.d, z12.d, z18.d\n"
+ "uzp2 z12.d, z12.d, z18.d\n"
+ "uzp1 z18.d, z13.d, z19.d\n"
+ "uzp2 z13.d, z13.d, z19.d\n"
+ "uzp1 z20.d, z20.d, z26.d\n"
+ "uzp1 z21.d, z21.d, z27.d\n"
+ "uzp1 z22.d, z22.d, z28.d\n"
+ "uzp1 z23.d, z23.d, z29.d\n"
+ "uzp1 z24.d, z24.d, z30.d\n"
+ "uzp1 z25.d, z25.d, z31.d\n"
+ "tbz %x[flags], #1, 41f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
+ "fmin z4.s, p7/M, z4.s, z1.s\n"
+ "fmin z14.s, p7/M, z14.s, z1.s\n"
+ "fmin z15.s, p7/M, z15.s, z1.s\n"
+ "fmin z16.s, p7/M, z16.s, z1.s\n"
+ "fmin z17.s, p7/M, z17.s, z1.s\n"
+ "fmin z18.s, p7/M, z18.s, z1.s\n"
+ "fmin z8.s, p7/M, z8.s, z1.s\n"
+ "fmin z9.s, p7/M, z9.s, z1.s\n"
+ "fmin z10.s, p7/M, z10.s, z1.s\n"
+ "fmin z11.s, p7/M, z11.s, z1.s\n"
+ "fmin z12.s, p7/M, z12.s, z1.s\n"
+ "fmin z13.s, p7/M, z13.s, z1.s\n"
+ "fmin z20.s, p7/M, z20.s, z1.s\n"
+ "fmin z21.s, p7/M, z21.s, z1.s\n"
+ "fmin z22.s, p7/M, z22.s, z1.s\n"
+ "fmin z23.s, p7/M, z23.s, z1.s\n"
+ "fmin z24.s, p7/M, z24.s, z1.s\n"
+ "fmin z25.s, p7/M, z25.s, z1.s\n"
+ "fmax z4.s, p7/M, z4.s, z0.s\n"
+ "fmax z14.s, p7/M, z14.s, z0.s\n"
+ "fmax z15.s, p7/M, z15.s, z0.s\n"
+ "fmax z16.s, p7/M, z16.s, z0.s\n"
+ "fmax z17.s, p7/M, z17.s, z0.s\n"
+ "fmax z18.s, p7/M, z18.s, z0.s\n"
+ "fmax z8.s, p7/M, z8.s, z0.s\n"
+ "fmax z9.s, p7/M, z9.s, z0.s\n"
+ "fmax z10.s, p7/M, z10.s, z0.s\n"
+ "fmax z11.s, p7/M, z11.s, z0.s\n"
+ "fmax z12.s, p7/M, z12.s, z0.s\n"
+ "fmax z13.s, p7/M, z13.s, z0.s\n"
+ "fmax z20.s, p7/M, z20.s, z0.s\n"
+ "fmax z21.s, p7/M, z21.s, z0.s\n"
+ "fmax z22.s, p7/M, z22.s, z0.s\n"
+ "fmax z23.s, p7/M, z23.s, z0.s\n"
+ "fmax z24.s, p7/M, z24.s, z0.s\n"
+ "fmax z25.s, p7/M, z25.s, z0.s\n"
+ "41:" // Height 3: No activation
+ "st1w { z4.s }, p6, [x12]\n"
+ "st1w { z14.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x21]\n"
+ "st1w { z21.s }, p5, [x21, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x21, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x21, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x21, #5, MUL VL]\n"
+ "42:" // Height 3: Writeback done
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
+ "bgt 30b\n"
+ "b 58f\n"
+ "43:" // Height 4
+ "ldr x19, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x20, #0x10\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x14, %x[bias]\n"
+ "mov x12, %x[output_ptr]\n"
+ "madd %x[output_ptr], x19, x20, %x[output_ptr]\n"
+ "44:" // Height 4: Column loop
+ "ldr x11, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "add x10, x11, x19, LSL #1\n"
+ "add x9, x10, x19, LSL #1\n"
+ "add x28, x9, x19, LSL #1\n"
+ "cntw x20, ALL, MUL #5\n"
+ "add x27, x28, x19, LSL #1\n"
+ "add x26, x27, x19, LSL #1\n"
+ "add x19, x26, x19, LSL #1\n"
+ "cmp x13, x20\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x26, x11\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x27, x11\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x28, x11\n"
+ "bgt 45f\n"
+ "decw x20\n"
+ "cmp x13, x20\n"
+ "mov x9, x11\n"
+ "bgt 45f\n"
+ "mov x10, x11\n"
+ "45:" // Height 4: B setup done
+ "mov x19, #0x0\n"
+ "whilelt p6.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p5.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p4.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p3.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p2.s, x19, x13\n"
+ "incw x19\n"
+ "whilelt p1.s, x19, x13\n"
+ "cbz x14, 46f\n"
+ "ld1w { z8.s }, p7/Z, [x14]\n"
+ "ld1w { z9.s }, p7/Z, [x14, #1, MUL VL]\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "ld1w { z10.s }, p7/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z11.s }, p7/Z, [x14, #3, MUL VL]\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
+ "ld1w { z12.s }, p7/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z13.s }, p7/Z, [x14, #5, MUL VL]\n"
+ "zip2 z16.d, z10.d, z10.d\n"
+ "zip1 z10.d, z10.d, z10.d\n"
+ "zip2 z17.d, z11.d, z11.d\n"
+ "zip1 z11.d, z11.d, z11.d\n"
+ "addvl x14, x14, #6\n"
+ "zip2 z18.d, z12.d, z12.d\n"
+ "zip1 z12.d, z12.d, z12.d\n"
+ "zip2 z19.d, z13.d, z13.d\n"
+ "zip1 z13.d, z13.d, z13.d\n"
+ "mov z20.d, z8.d\n"
+ "mov z26.d, z14.d\n"
+ "mov z21.d, z9.d\n"
+ "mov z27.d, z15.d\n"
+ "mov z22.d, z10.d\n"
+ "mov z28.d, z16.d\n"
+ "mov z23.d, z11.d\n"
+ "mov z29.d, z17.d\n"
+ "mov z24.d, z12.d\n"
+ "mov z30.d, z18.d\n"
+ "mov z25.d, z13.d\n"
+ "mov z31.d, z19.d\n"
+ "b 48f\n"
+ "46:" // Height 4: no bias
+ "tbz %x[flags], #0, 47f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "ld1w { z9.s }, p6/Z, [x12]\n"
+ "add x20, x21, x19, LSL #2\n"
+ "ld1w { z10.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1w { z11.s }, p4/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z12.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z20.s }, p1/Z, [x12, #5, MUL VL]\n"
+ "ld1w { z14.s }, p6/Z, [x22]\n"
+ "zip1 z8.d, z9.d, z14.d\n"
+ "zip2 z14.d, z9.d, z14.d\n"
+ "ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
+ "zip1 z9.d, z10.d, z15.d\n"
+ "zip2 z15.d, z10.d, z15.d\n"
+ "ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "zip1 z10.d, z11.d, z16.d\n"
+ "zip2 z16.d, z11.d, z16.d\n"
+ "ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z21.s }, p6/Z, [x21]\n"
+ "zip1 z11.d, z12.d, z17.d\n"
+ "zip2 z17.d, z12.d, z17.d\n"
+ "ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
+ "zip1 z12.d, z13.d, z18.d\n"
+ "zip2 z18.d, z13.d, z18.d\n"
+ "ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "zip1 z13.d, z20.d, z19.d\n"
+ "zip2 z19.d, z20.d, z19.d\n"
+ "ld1w { z4.s }, p1/Z, [x21, #5, MUL VL]\n"
+ "ld1w { z26.s }, p6/Z, [x20]\n"
+ "zip1 z20.d, z21.d, z26.d\n"
+ "zip2 z26.d, z21.d, z26.d\n"
+ "ld1w { z27.s }, p5/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z28.s }, p4/Z, [x20, #2, MUL VL]\n"
+ "zip1 z21.d, z22.d, z27.d\n"
+ "zip2 z27.d, z22.d, z27.d\n"
+ "ld1w { z29.s }, p3/Z, [x20, #3, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x20, #4, MUL VL]\n"
+ "zip1 z22.d, z23.d, z28.d\n"
+ "zip2 z28.d, z23.d, z28.d\n"
+ "ld1w { z31.s }, p1/Z, [x20, #5, MUL VL]\n"
+ "zip1 z23.d, z24.d, z29.d\n"
+ "zip2 z29.d, z24.d, z29.d\n"
+ "zip1 z24.d, z25.d, z30.d\n"
+ "zip2 z30.d, z25.d, z30.d\n"
+ "zip1 z25.d, z4.d, z31.d\n"
+ "zip2 z31.d, z4.d, z31.d\n"
+ "b 48f\n"
+ "47:" // Height 4: no accumulate
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "48:" // Height 4: setup done
+ "mov x25, #0x0\n"
+ "49:" // Height 4: String loop
+ "ldr x19, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr w24, [x19, x25, LSL #0x2]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 50f\n"
+ "ldr x20, [%x[input_ptr], x25, LSL #0x3]\n"
+ "add x20, x20, x19, LSL #3\n"
+ "ldr x23, [x20, #0x0]\n"
+ "ldr x22, [x20, #0x8]\n"
+ "ldr x21, [x20, #0x10]\n"
+ "ldr x20, [x20, #0x18]\n"
+ "cbnz x25, 51f\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x23, x23, x19, LSL #2\n"
+ "add x22, x22, x19, LSL #2\n"
+ "add x21, x21, x19, LSL #2\n"
+ "add x20, x20, x19, LSL #2\n"
+ "b 51f\n"
+ "50:" // Height 4: setup direct input
+ "mov x23, %x[input_ptr]\n"
+ "add x22, x23, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "51:" // Height 4: input setup done
+ "cmp x24, #0x4\n"
+ "ble 53f\n"
+ "52:" // Height 4: Multiply loop: Main loop head
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ "ld1rqw { z3.s }, p0/Z, [x20]\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ ".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
+ ".inst 0x658abc63 // bfcvt z3.h, p7/M, z3.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
+ "uzp1 z2.h, z2.h, z2.h\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "trn1 z0.d, z0.d, z1.d\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ "sub x24, x24, #0x4\n"
+ "trn1 z2.d, z2.d, z3.d\n"
+ ".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ ".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "cmp x24, #0x4\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "add x20, x20, #0x10\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ "addvl x9, x9, #2\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0x6466e459 // bfmmla z25.s, z2.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ ".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
+ "bgt 52b\n"
+ "53:" // Height 4: Multiply loop: Single iteration only
+ "whilelt p0.s, XZR, x24\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z1.s }, p0/Z, [x22]\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
+ "ld1rqw { z2.s }, p0/Z, [x21]\n"
+ "ld1rqw { z3.s }, p0/Z, [x20]\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ ".inst 0x658abc42 // bfcvt z2.h, p7/M, z2.s\n"
+ ".inst 0x658abc63 // bfcvt z3.h, p7/M, z3.s\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "ld1h { z4.h }, p7/Z, [x11]\n"
+ "ld1h { z5.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
+ "uzp1 z2.h, z2.h, z2.h\n"
+ "ld1h { z6.h }, p7/Z, [x10]\n"
+ "ld1h { z7.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "trn1 z0.d, z0.d, z1.d\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ "addvl x11, x11, #2\n"
+ "trn1 z2.d, z2.d, z3.d\n"
+ ".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
+ ".inst 0x6465e40e // bfmmla z14.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x9]\n"
+ ".inst 0x6465e45a // bfmmla z26.s, z2.h, z5.h\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6467e40f // bfmmla z15.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x28]\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6467e45b // bfmmla z27.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x28, #1, MUL VL]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
+ ".inst 0x6465e410 // bfmmla z16.s, z0.h, z5.h\n"
+ "ld1h { z4.h }, p7/Z, [x27]\n"
+ ".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
+ ".inst 0x6466e40b // bfmmla z11.s, z0.h, z6.h\n"
+ "ld1h { z5.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
+ ".inst 0x6466e457 // bfmmla z23.s, z2.h, z6.h\n"
+ ".inst 0x6467e411 // bfmmla z17.s, z0.h, z7.h\n"
+ "ld1h { z6.h }, p7/Z, [x26]\n"
+ ".inst 0x6467e45d // bfmmla z29.s, z2.h, z7.h\n"
+ "ld1h { z7.h }, p7/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0x6464e458 // bfmmla z24.s, z2.h, z4.h\n"
+ ".inst 0x6465e412 // bfmmla z18.s, z0.h, z5.h\n"
+ ".inst 0x6465e45e // bfmmla z30.s, z2.h, z5.h\n"
+ ".inst 0x6466e40d // bfmmla z13.s, z0.h, z6.h\n"
+ ".inst 0x6466e459 // bfmmla z25.s, z2.h, z6.h\n"
+ ".inst 0x6467e413 // bfmmla z19.s, z0.h, z7.h\n"
+ ".inst 0x6467e45f // bfmmla z31.s, z2.h, z7.h\n"
+ "54:" // Height 4: Multiply loop: multiply skip
+ "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
+ "cmp x25, x19\n"
+ "bne 49b\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x22, x12, x19, LSL #2\n"
+ "add x21, x22, x19, LSL #2\n"
+ "uzp1 z4.d, z8.d, z14.d\n"
+ "uzp2 z8.d, z8.d, z14.d\n"
+ "uzp1 z14.d, z9.d, z15.d\n"
+ "add x20, x21, x19, LSL #2\n"
+ "uzp2 z9.d, z9.d, z15.d\n"
+ "uzp1 z15.d, z10.d, z16.d\n"
+ "uzp2 z10.d, z10.d, z16.d\n"
+ "uzp1 z16.d, z11.d, z17.d\n"
+ "uzp2 z11.d, z11.d, z17.d\n"
+ "uzp1 z17.d, z12.d, z18.d\n"
+ "uzp2 z12.d, z12.d, z18.d\n"
+ "uzp1 z18.d, z13.d, z19.d\n"
+ "uzp2 z13.d, z13.d, z19.d\n"
+ "uzp1 z19.d, z20.d, z26.d\n"
+ "uzp2 z20.d, z20.d, z26.d\n"
+ "uzp1 z26.d, z21.d, z27.d\n"
+ "uzp2 z21.d, z21.d, z27.d\n"
+ "uzp1 z27.d, z22.d, z28.d\n"
+ "uzp2 z22.d, z22.d, z28.d\n"
+ "uzp1 z28.d, z23.d, z29.d\n"
+ "uzp2 z23.d, z23.d, z29.d\n"
+ "uzp1 z29.d, z24.d, z30.d\n"
+ "uzp2 z24.d, z24.d, z30.d\n"
+ "uzp1 z30.d, z25.d, z31.d\n"
+ "uzp2 z25.d, z25.d, z31.d\n"
+ "tbz %x[flags], #1, 55f\n"
+ "add x19, %x[args_ptr], %[offset_max]\n"
+ "ld1rw { z1.s }, p7/Z, [x19]\n"
+ "add x19, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x19]\n"
+ "fmin z4.s, p7/M, z4.s, z1.s\n"
+ "fmin z14.s, p7/M, z14.s, z1.s\n"
+ "fmin z15.s, p7/M, z15.s, z1.s\n"
+ "fmin z16.s, p7/M, z16.s, z1.s\n"
+ "fmin z17.s, p7/M, z17.s, z1.s\n"
+ "fmin z18.s, p7/M, z18.s, z1.s\n"
+ "fmin z8.s, p7/M, z8.s, z1.s\n"
+ "fmin z9.s, p7/M, z9.s, z1.s\n"
+ "fmin z10.s, p7/M, z10.s, z1.s\n"
+ "fmin z11.s, p7/M, z11.s, z1.s\n"
+ "fmin z12.s, p7/M, z12.s, z1.s\n"
+ "fmin z13.s, p7/M, z13.s, z1.s\n"
+ "fmin z19.s, p7/M, z19.s, z1.s\n"
+ "fmin z26.s, p7/M, z26.s, z1.s\n"
+ "fmin z27.s, p7/M, z27.s, z1.s\n"
+ "fmin z28.s, p7/M, z28.s, z1.s\n"
+ "fmin z29.s, p7/M, z29.s, z1.s\n"
+ "fmin z30.s, p7/M, z30.s, z1.s\n"
+ "fmin z20.s, p7/M, z20.s, z1.s\n"
+ "fmin z21.s, p7/M, z21.s, z1.s\n"
+ "fmin z22.s, p7/M, z22.s, z1.s\n"
+ "fmin z23.s, p7/M, z23.s, z1.s\n"
+ "fmin z24.s, p7/M, z24.s, z1.s\n"
+ "fmin z25.s, p7/M, z25.s, z1.s\n"
+ "fmax z4.s, p7/M, z4.s, z0.s\n"
+ "fmax z14.s, p7/M, z14.s, z0.s\n"
+ "fmax z15.s, p7/M, z15.s, z0.s\n"
+ "fmax z16.s, p7/M, z16.s, z0.s\n"
+ "fmax z17.s, p7/M, z17.s, z0.s\n"
+ "fmax z18.s, p7/M, z18.s, z0.s\n"
+ "fmax z8.s, p7/M, z8.s, z0.s\n"
+ "fmax z9.s, p7/M, z9.s, z0.s\n"
+ "fmax z10.s, p7/M, z10.s, z0.s\n"
+ "fmax z11.s, p7/M, z11.s, z0.s\n"
+ "fmax z12.s, p7/M, z12.s, z0.s\n"
+ "fmax z13.s, p7/M, z13.s, z0.s\n"
+ "fmax z19.s, p7/M, z19.s, z0.s\n"
+ "fmax z26.s, p7/M, z26.s, z0.s\n"
+ "fmax z27.s, p7/M, z27.s, z0.s\n"
+ "fmax z28.s, p7/M, z28.s, z0.s\n"
+ "fmax z29.s, p7/M, z29.s, z0.s\n"
+ "fmax z30.s, p7/M, z30.s, z0.s\n"
+ "fmax z20.s, p7/M, z20.s, z0.s\n"
+ "fmax z21.s, p7/M, z21.s, z0.s\n"
+ "fmax z22.s, p7/M, z22.s, z0.s\n"
+ "fmax z23.s, p7/M, z23.s, z0.s\n"
+ "fmax z24.s, p7/M, z24.s, z0.s\n"
+ "fmax z25.s, p7/M, z25.s, z0.s\n"
+ "55:" // Height 4: No activation
+ "st1w { z4.s }, p6, [x12]\n"
+ "st1w { z14.s }, p5, [x12, #1, MUL VL]\n"
+ "st1w { z15.s }, p4, [x12, #2, MUL VL]\n"
+ "st1w { z16.s }, p3, [x12, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x12, #4, MUL VL]\n"
+ "st1w { z18.s }, p1, [x12, #5, MUL VL]\n"
+ "addvl x12, x12, #6\n"
+ "st1w { z8.s }, p6, [x22]\n"
+ "st1w { z9.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z19.s }, p6, [x21]\n"
+ "st1w { z26.s }, p5, [x21, #1, MUL VL]\n"
+ "st1w { z27.s }, p4, [x21, #2, MUL VL]\n"
+ "st1w { z28.s }, p3, [x21, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #4, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x20]\n"
+ "st1w { z21.s }, p5, [x20, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x20, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x20, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x20, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x20, #5, MUL VL]\n"
+ "56:" // Height 4: Writeback done
+ "decw x13, ALL, MUL #6\n"
+ "cmp x13, XZR\n"
+ "bgt 44b\n"
+ "subs %x[M], %x[M], #0x4\n"
+ "beq 58f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 57f\n"
+ "add x20, x20, #0x4\n"
+ "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "57:" // Update direct input
+ "mov x19, #0x10\n"
+ "madd %x[input_ptr], x19, x20, %x[input_ptr]\n"
+ "b 1b\n"
+ "58:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
+ : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "x9", "x10", "x11", "x12", "x13", "x14", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp
new file mode 100644
index 0000000000..5792a7152d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const bfloat16 *, const bfloat16 *, size_t, \
+ float *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffinterleaved_bf16fp32_mmla_8x3VL( ARGLIST );
+
+class cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL
+{
+public:
+ typedef bfloat16 operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<float>() * 3;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<float>();
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL2VL_BL64;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+
+ StdTransformsSVE<operand_type, result_type, 8, 6, 4, 2> transforms = {};
+ StdTransformsSVE<operand_type, result_type, 8, 6, 4, 2, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, bfloat16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 39.90, 8.55, 4.42 };
+ }
+ }
+
+
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 39.66, 5.18, 4.37 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffinterleaved_bf16fp32_mmla_8x3VL;
+ cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
new file mode 100644
index 0000000000..1f1e08d3dd
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstddef>
+#include "../../bfloat.hpp"
+
+namespace arm_gemm {
+
+void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
+ const bfloat16 *Apanel,
+ const bfloat16 *Bpanel,
+ size_t B_stride,
+ float *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const bfloat16 *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const bfloat16 *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/4) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "1:" // Height loop
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
+ "bgt 3f\n"
+ "decw x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
+ "bgt 3f\n"
+ "mov x21, x25\n"
+ "3:" // B setup done
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x25]\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "ld1h { z5.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "addvl x25, x25, #2\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
+ ".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
+ ".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
+ ".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
+ ".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
+ "ld1h { z7.h }, p0/Z, [x21, #1, MUL VL]\n"
+ ".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
+ ".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #1, MUL VL]\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
+ "sub x19, x19, #0x2\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ "cmp x19, #0x2\n"
+ ".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
+ ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
+ "ld1h { z6.h }, p0/Z, [x25]\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ ".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel], #16]\n"
+ ".inst 0x6464e430 // bfmmla z16.s, z1.h, z4.h\n"
+ ".inst 0x6465e433 // bfmmla z19.s, z1.h, z5.h\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #32]\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
+ ".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
+ "ld1h { z7.h }, p0/Z, [x25, #1, MUL VL]\n"
+ ".inst 0x6464e47c // bfmmla z28.s, z3.h, z4.h\n"
+ ".inst 0x6465e47f // bfmmla z31.s, z3.h, z5.h\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #48]\n"
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel], #64]\n"
+ ".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
+ "ld1h { z4.h }, p0/Z, [x21, #2, MUL VL]\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ ".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
+ "ld1h { z5.h }, p0/Z, [x21, #3, MUL VL]\n"
+ ".inst 0x6466e47a // bfmmla z26.s, z3.h, z6.h\n"
+ ".inst 0x6467e47d // bfmmla z29.s, z3.h, z7.h\n"
+ "ld1h { z6.h }, p0/Z, [x20, #2, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #3, MUL VL]\n"
+ ".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
+ ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
+ ".inst 0x6464e42f // bfmmla z15.s, z1.h, z4.h\n"
+ ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
+ "addvl x21, x21, #4\n"
+ ".inst 0x6464e455 // bfmmla z21.s, z2.h, z4.h\n"
+ ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
+ "addvl x20, x20, #4\n"
+ ".inst 0x6464e47b // bfmmla z27.s, z3.h, z4.h\n"
+ ".inst 0x6465e47e // bfmmla z30.s, z3.h, z5.h\n"
+ "ld1h { z4.h }, p0/Z, [x25, #2, MUL VL]\n"
+ ".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
+ ".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel], #80]\n"
+ ".inst 0x6466e430 // bfmmla z16.s, z1.h, z6.h\n"
+ ".inst 0x6467e433 // bfmmla z19.s, z1.h, z7.h\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #96]\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ ".inst 0x6467e459 // bfmmla z25.s, z2.h, z7.h\n"
+ "ld1h { z5.h }, p0/Z, [x25, #3, MUL VL]\n"
+ ".inst 0x6466e47c // bfmmla z28.s, z3.h, z6.h\n"
+ ".inst 0x6467e47f // bfmmla z31.s, z3.h, z7.h\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #112]\n"
+ "add %x[Apanel], %x[Apanel], #0x80\n"
+ "addvl x25, x25, #4\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
+ ".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
+ ".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
+ ".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
+ ".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
+ ".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
+ ".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
+ "ld1h { z7.h }, p0/Z, [x21, #1, MUL VL]\n"
+ ".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
+ ".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
+ "ld1h { z5.h }, p0/Z, [x20, #1, MUL VL]\n"
+ ".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ "addvl x21, x21, #2\n"
+ ".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
+ ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
+ "addvl x20, x20, #2\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ ".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
+ ".inst 0x6464e430 // bfmmla z16.s, z1.h, z4.h\n"
+ ".inst 0x6465e433 // bfmmla z19.s, z1.h, z5.h\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
+ ".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
+ ".inst 0x6464e47c // bfmmla z28.s, z3.h, z4.h\n"
+ ".inst 0x6465e47f // bfmmla z31.s, z3.h, z5.h\n"
+ "cbz x19, 6f\n"
+ "ld1h { z6.h }, p0/Z, [x25]\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ ".inst 0x6466e408 // bfmmla z8.s, z0.h, z6.h\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1h { z7.h }, p0/Z, [x25, #1, MUL VL]\n"
+ ".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x6466e42e // bfmmla z14.s, z1.h, z6.h\n"
+ ".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
+ ".inst 0x6466e454 // bfmmla z20.s, z2.h, z6.h\n"
+ "ld1h { z4.h }, p0/Z, [x21]\n"
+ ".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
+ ".inst 0x6466e47a // bfmmla z26.s, z3.h, z6.h\n"
+ "ld1h { z5.h }, p0/Z, [x21, #1, MUL VL]\n"
+ ".inst 0x6467e47d // bfmmla z29.s, z3.h, z7.h\n"
+ "ld1h { z6.h }, p0/Z, [x20]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #1, MUL VL]\n"
+ ".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
+ ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x6464e42f // bfmmla z15.s, z1.h, z4.h\n"
+ ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
+ ".inst 0x6464e455 // bfmmla z21.s, z2.h, z4.h\n"
+ ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
+ ".inst 0x6464e47b // bfmmla z27.s, z3.h, z4.h\n"
+ ".inst 0x6465e47e // bfmmla z30.s, z3.h, z5.h\n"
+ ".inst 0x6466e40a // bfmmla z10.s, z0.h, z6.h\n"
+ ".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
+ ".inst 0x6466e430 // bfmmla z16.s, z1.h, z6.h\n"
+ ".inst 0x6467e433 // bfmmla z19.s, z1.h, z7.h\n"
+ ".inst 0x6466e456 // bfmmla z22.s, z2.h, z6.h\n"
+ ".inst 0x6467e459 // bfmmla z25.s, z2.h, z7.h\n"
+ ".inst 0x6466e47c // bfmmla z28.s, z3.h, z6.h\n"
+ ".inst 0x6467e47f // bfmmla z31.s, z3.h, z7.h\n"
+ "6:" // multiply loop done
+ "decw x24, ALL, MUL #3\n"
+ "uzp1 z4.d, z8.d, z11.d\n"
+ "uzp2 z8.d, z8.d, z11.d\n"
+ "uzp1 z11.d, z9.d, z12.d\n"
+ "uzp2 z9.d, z9.d, z12.d\n"
+ "st1w { z4.s }, p0, [%x[Cpanel]]\n"
+ "uzp1 z12.d, z10.d, z13.d\n"
+ "uzp2 z10.d, z10.d, z13.d\n"
+ "st1w { z11.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z12.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "uzp1 z13.d, z14.d, z17.d\n"
+ "uzp2 z14.d, z14.d, z17.d\n"
+ "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "uzp1 z17.d, z15.d, z18.d\n"
+ "cmp x24, XZR\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "uzp2 z15.d, z15.d, z18.d\n"
+ "uzp1 z18.d, z16.d, z19.d\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "uzp2 z16.d, z16.d, z19.d\n"
+ "uzp1 z19.d, z20.d, z23.d\n"
+ "st1w { z13.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "uzp2 z20.d, z20.d, z23.d\n"
+ "uzp1 z23.d, z21.d, z24.d\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "uzp2 z21.d, z21.d, z24.d\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "uzp1 z24.d, z22.d, z25.d\n"
+ "uzp2 z22.d, z22.d, z25.d\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "uzp1 z25.d, z26.d, z29.d\n"
+ "uzp2 z26.d, z26.d, z29.d\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "uzp1 z29.d, z27.d, z30.d\n"
+ "uzp2 z27.d, z27.d, z30.d\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "uzp1 z30.d, z28.d, z31.d\n"
+ "uzp2 z28.d, z28.d, z31.d\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z24.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z20.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
+ "st1w { z21.s }, p0, [%x[Cpanel]]\n"
+ "st1w { z22.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z25.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z29.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z30.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z26.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z27.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z28.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #8\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp
new file mode 100644
index 0000000000..6d36bf8bbf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const __fp16 *, const __fp16 *, size_t, \
+ __fp16 *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffinterleaved_fp16_mla_8x3VL( ARGLIST );
+void sve_ffinterleaved_fp16_mla_8x3VL_a64fx( ARGLIST );
+
+class cls_sve_ffinterleaved_fp16_mla_8x3VL
+{
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<__fp16>() * 3;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<__fp16>();
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL1VL_BL16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, __fp16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 25.53, 7.89, 3.82 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffinterleaved_fp16_mla_8x3VL;
+ cls_sve_ffinterleaved_fp16_mla_8x3VL(const CPUInfo *ci)
+ {
+ switch(ci->get_cpu_model()) {
+ default:
+ break;
+ case CPUModel::A64FX:
+ kernel=sve_ffinterleaved_fp16_mla_8x3VL_a64fx;
+ break;
+ }
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
new file mode 100644
index 0000000000..cd4da2c124
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstddef>
+
+namespace arm_gemm {
+
+void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
+ const __fp16 *Apanel,
+ const __fp16 *Bpanel,
+ size_t B_stride,
+ __fp16 *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const __fp16 *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const __fp16 *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/1) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "1:" // Height loop
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cnth x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
+ "bgt 3f\n"
+ "dech x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
+ "bgt 3f\n"
+ "mov x21, x25\n"
+ "3:" // B setup done
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "ld1h { z1.h }, p0/Z, [x21]\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "fmla z8.h, p0/M, z0.h, z3.h\n"
+ "fmla z9.h, p0/M, z1.h, z3.h\n"
+ "sub x19, x19, #0x2\n"
+ "fmla z10.h, p0/M, z2.h, z3.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
+ "fmla z11.h, p0/M, z0.h, z4.h\n"
+ "fmla z12.h, p0/M, z1.h, z4.h\n"
+ "fmla z13.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
+ "fmla z14.h, p0/M, z0.h, z5.h\n"
+ "fmla z15.h, p0/M, z1.h, z5.h\n"
+ "cmp x19, #0x2\n"
+ "fmla z16.h, p0/M, z2.h, z5.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #12]\n"
+ "fmla z17.h, p0/M, z0.h, z6.h\n"
+ "fmla z18.h, p0/M, z1.h, z6.h\n"
+ "fmla z19.h, p0/M, z2.h, z6.h\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #14]\n"
+ "fmla z20.h, p0/M, z0.h, z3.h\n"
+ "fmla z21.h, p0/M, z1.h, z3.h\n"
+ "fmla z22.h, p0/M, z2.h, z3.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #16]\n"
+ "fmla z23.h, p0/M, z0.h, z4.h\n"
+ "fmla z24.h, p0/M, z1.h, z4.h\n"
+ "fmla z25.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #18]\n"
+ "fmla z26.h, p0/M, z0.h, z5.h\n"
+ "fmla z27.h, p0/M, z1.h, z5.h\n"
+ "fmla z28.h, p0/M, z2.h, z5.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #20]\n"
+ "fmla z29.h, p0/M, z0.h, z6.h\n"
+ "ld1h { z0.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "fmla z30.h, p0/M, z1.h, z6.h\n"
+ "fmla z31.h, p0/M, z2.h, z6.h\n"
+ "ld1h { z1.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x20, #1, MUL VL]\n"
+ "fmla z8.h, p0/M, z0.h, z3.h\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #22]\n"
+ "fmla z9.h, p0/M, z1.h, z3.h\n"
+ "fmla z10.h, p0/M, z2.h, z3.h\n"
+ "fmla z11.h, p0/M, z0.h, z4.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #24]\n"
+ "fmla z12.h, p0/M, z1.h, z4.h\n"
+ "fmla z13.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #26]\n"
+ "fmla z14.h, p0/M, z0.h, z5.h\n"
+ "fmla z15.h, p0/M, z1.h, z5.h\n"
+ "addvl x25, x25, #2\n"
+ "fmla z16.h, p0/M, z2.h, z5.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #28]\n"
+ "fmla z17.h, p0/M, z0.h, z6.h\n"
+ "fmla z18.h, p0/M, z1.h, z6.h\n"
+ "fmla z19.h, p0/M, z2.h, z6.h\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #30]\n"
+ "addvl x21, x21, #2\n"
+ "addvl x20, x20, #2\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla z20.h, p0/M, z0.h, z3.h\n"
+ "fmla z21.h, p0/M, z1.h, z3.h\n"
+ "fmla z22.h, p0/M, z2.h, z3.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
+ "fmla z23.h, p0/M, z0.h, z4.h\n"
+ "fmla z24.h, p0/M, z1.h, z4.h\n"
+ "fmla z25.h, p0/M, z2.h, z4.h\n"
+ "fmla z26.h, p0/M, z0.h, z5.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
+ "fmla z27.h, p0/M, z1.h, z5.h\n"
+ "fmla z28.h, p0/M, z2.h, z5.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
+ "fmla z29.h, p0/M, z0.h, z6.h\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
+ "fmla z30.h, p0/M, z1.h, z6.h\n"
+ "fmla z31.h, p0/M, z2.h, z6.h\n"
+ "ld1h { z1.h }, p0/Z, [x21]\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "fmla z8.h, p0/M, z0.h, z3.h\n"
+ "fmla z9.h, p0/M, z1.h, z3.h\n"
+ "addvl x25, x25, #1\n"
+ "fmla z10.h, p0/M, z2.h, z3.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
+ "fmla z11.h, p0/M, z0.h, z4.h\n"
+ "fmla z12.h, p0/M, z1.h, z4.h\n"
+ "fmla z13.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
+ "fmla z14.h, p0/M, z0.h, z5.h\n"
+ "fmla z15.h, p0/M, z1.h, z5.h\n"
+ "addvl x21, x21, #1\n"
+ "fmla z16.h, p0/M, z2.h, z5.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #12]\n"
+ "fmla z17.h, p0/M, z0.h, z6.h\n"
+ "fmla z18.h, p0/M, z1.h, z6.h\n"
+ "fmla z19.h, p0/M, z2.h, z6.h\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #14]\n"
+ "fmla z20.h, p0/M, z0.h, z3.h\n"
+ "fmla z21.h, p0/M, z1.h, z3.h\n"
+ "addvl x20, x20, #1\n"
+ "fmla z22.h, p0/M, z2.h, z3.h\n"
+ "fmla z23.h, p0/M, z0.h, z4.h\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "fmla z24.h, p0/M, z1.h, z4.h\n"
+ "fmla z25.h, p0/M, z2.h, z4.h\n"
+ "fmla z26.h, p0/M, z0.h, z5.h\n"
+ "fmla z27.h, p0/M, z1.h, z5.h\n"
+ "fmla z28.h, p0/M, z2.h, z5.h\n"
+ "fmla z29.h, p0/M, z0.h, z6.h\n"
+ "fmla z30.h, p0/M, z1.h, z6.h\n"
+ "fmla z31.h, p0/M, z2.h, z6.h\n"
+ "cbz x19, 6f\n"
+ "ld1h { z0.h }, p0/Z, [x25]\n"
+ "ld1h { z1.h }, p0/Z, [x21]\n"
+ "ld1h { z2.h }, p0/Z, [x20]\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
+ "fmla z8.h, p0/M, z0.h, z3.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
+ "fmla z9.h, p0/M, z1.h, z3.h\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
+ "fmla z10.h, p0/M, z2.h, z3.h\n"
+ "fmla z11.h, p0/M, z0.h, z4.h\n"
+ "fmla z12.h, p0/M, z1.h, z4.h\n"
+ "fmla z13.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
+ "fmla z14.h, p0/M, z0.h, z5.h\n"
+ "fmla z15.h, p0/M, z1.h, z5.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
+ "fmla z16.h, p0/M, z2.h, z5.h\n"
+ "fmla z17.h, p0/M, z0.h, z6.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #12]\n"
+ "fmla z18.h, p0/M, z1.h, z6.h\n"
+ "fmla z19.h, p0/M, z2.h, z6.h\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #14]\n"
+ "fmla z20.h, p0/M, z0.h, z3.h\n"
+ "fmla z21.h, p0/M, z1.h, z3.h\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "fmla z22.h, p0/M, z2.h, z3.h\n"
+ "fmla z23.h, p0/M, z0.h, z4.h\n"
+ "fmla z24.h, p0/M, z1.h, z4.h\n"
+ "fmla z25.h, p0/M, z2.h, z4.h\n"
+ "fmla z26.h, p0/M, z0.h, z5.h\n"
+ "fmla z27.h, p0/M, z1.h, z5.h\n"
+ "fmla z28.h, p0/M, z2.h, z5.h\n"
+ "fmla z29.h, p0/M, z0.h, z6.h\n"
+ "fmla z30.h, p0/M, z1.h, z6.h\n"
+ "fmla z31.h, p0/M, z2.h, z6.h\n"
+ "6:" // multiply loop done
+ "dech x24, ALL, MUL #3\n"
+ "st1h { z8.h }, p0, [%x[Cpanel]]\n"
+ "cmp x24, XZR\n"
+ "st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1h { z12.h }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1h { z13.h }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1h { z14.h }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1h { z15.h }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1h { z16.h }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1h { z17.h }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1h { z18.h }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1h { z19.h }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1h { z20.h }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1h { z21.h }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1h { z22.h }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1h { z23.h }, p0, [%x[Cpanel], #-1, MUL VL]\n"
+ "st1h { z24.h }, p0, [%x[Cpanel]]\n"
+ "st1h { z25.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1h { z26.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1h { z28.h }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1h { z29.h }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1h { z30.h }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1h { z31.h }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #8\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
new file mode 100644
index 0000000000..431c2a88f5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstddef>
+
+namespace arm_gemm {
+
+void sve_ffinterleaved_fp16_mla_8x3VL(
+ const __fp16 *Apanel,
+ const __fp16 *Bpanel,
+ size_t B_stride,
+ __fp16 *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const __fp16 *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const __fp16 *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/1) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "1:" // Height loop
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cnth x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #1\n"
+ "add x20, x21, x19, LSL #1\n"
+ "add x19, x20, x19, LSL #1\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
+ "bgt 3f\n"
+ "dech x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
+ "bgt 3f\n"
+ "mov x21, x25\n"
+ "3:" // B setup done
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "ld1h { z2.h }, p0/Z, [x25]\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "ld1h { z3.h }, p0/Z, [x21]\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "fmla z11.h, z2.h, z0.h[1]\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
+ "fmla z14.h, z2.h, z0.h[2]\n"
+ "fmla z17.h, z2.h, z0.h[3]\n"
+ "ld1h { z5.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "fmla z20.h, z2.h, z0.h[4]\n"
+ "fmla z23.h, z2.h, z0.h[5]\n"
+ "ld1h { z6.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "fmla z26.h, z2.h, z0.h[6]\n"
+ "fmla z29.h, z2.h, z0.h[7]\n"
+ "ld1h { z7.h }, p0/Z, [x20, #1, MUL VL]\n"
+ "fmla z9.h, z3.h, z0.h[0]\n"
+ "fmla z12.h, z3.h, z0.h[1]\n"
+ "addvl x25, x25, #2\n"
+ "fmla z15.h, z3.h, z0.h[2]\n"
+ "fmla z18.h, z3.h, z0.h[3]\n"
+ "addvl x21, x21, #2\n"
+ "fmla z21.h, z3.h, z0.h[4]\n"
+ "fmla z24.h, z3.h, z0.h[5]\n"
+ "addvl x20, x20, #2\n"
+ "fmla z27.h, z3.h, z0.h[6]\n"
+ "fmla z30.h, z3.h, z0.h[7]\n"
+ "sub x19, x19, #0x2\n"
+ "fmla z10.h, z4.h, z0.h[0]\n"
+ "fmla z13.h, z4.h, z0.h[1]\n"
+ "cmp x19, #0x2\n"
+ "fmla z16.h, z4.h, z0.h[2]\n"
+ "fmla z19.h, z4.h, z0.h[3]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla z22.h, z4.h, z0.h[4]\n"
+ "fmla z25.h, z4.h, z0.h[5]\n"
+ "ld1h { z2.h }, p0/Z, [x25]\n"
+ "fmla z28.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "fmla z11.h, z5.h, z1.h[1]\n"
+ "ld1h { z3.h }, p0/Z, [x21]\n"
+ "fmla z14.h, z5.h, z1.h[2]\n"
+ "fmla z17.h, z5.h, z1.h[3]\n"
+ "ld1h { z4.h }, p0/Z, [x20]\n"
+ "fmla z20.h, z5.h, z1.h[4]\n"
+ "fmla z23.h, z5.h, z1.h[5]\n"
+ "fmla z26.h, z5.h, z1.h[6]\n"
+ "fmla z29.h, z5.h, z1.h[7]\n"
+ "fmla z9.h, z6.h, z1.h[0]\n"
+ "fmla z12.h, z6.h, z1.h[1]\n"
+ "fmla z15.h, z6.h, z1.h[2]\n"
+ "fmla z18.h, z6.h, z1.h[3]\n"
+ "fmla z21.h, z6.h, z1.h[4]\n"
+ "fmla z24.h, z6.h, z1.h[5]\n"
+ "fmla z27.h, z6.h, z1.h[6]\n"
+ "fmla z30.h, z6.h, z1.h[7]\n"
+ "fmla z10.h, z7.h, z1.h[0]\n"
+ "fmla z13.h, z7.h, z1.h[1]\n"
+ "fmla z16.h, z7.h, z1.h[2]\n"
+ "fmla z19.h, z7.h, z1.h[3]\n"
+ "fmla z22.h, z7.h, z1.h[4]\n"
+ "fmla z25.h, z7.h, z1.h[5]\n"
+ "fmla z28.h, z7.h, z1.h[6]\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "fmla z11.h, z2.h, z0.h[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "fmla z14.h, z2.h, z0.h[2]\n"
+ "fmla z17.h, z2.h, z0.h[3]\n"
+ "addvl x25, x25, #1\n"
+ "fmla z20.h, z2.h, z0.h[4]\n"
+ "fmla z23.h, z2.h, z0.h[5]\n"
+ "addvl x21, x21, #1\n"
+ "fmla z26.h, z2.h, z0.h[6]\n"
+ "fmla z29.h, z2.h, z0.h[7]\n"
+ "addvl x20, x20, #1\n"
+ "fmla z9.h, z3.h, z0.h[0]\n"
+ "fmla z12.h, z3.h, z0.h[1]\n"
+ "fmla z15.h, z3.h, z0.h[2]\n"
+ "fmla z18.h, z3.h, z0.h[3]\n"
+ "fmla z21.h, z3.h, z0.h[4]\n"
+ "fmla z24.h, z3.h, z0.h[5]\n"
+ "fmla z27.h, z3.h, z0.h[6]\n"
+ "fmla z30.h, z3.h, z0.h[7]\n"
+ "fmla z10.h, z4.h, z0.h[0]\n"
+ "fmla z13.h, z4.h, z0.h[1]\n"
+ "fmla z16.h, z4.h, z0.h[2]\n"
+ "fmla z19.h, z4.h, z0.h[3]\n"
+ "fmla z22.h, z4.h, z0.h[4]\n"
+ "fmla z25.h, z4.h, z0.h[5]\n"
+ "fmla z28.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "cbz x19, 6f\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "ld1h { z5.h }, p0/Z, [x25]\n"
+ "fmla z8.h, z5.h, z0.h[0]\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
+ "ld1h { z7.h }, p0/Z, [x20]\n"
+ "fmla z11.h, z5.h, z0.h[1]\n"
+ "fmla z14.h, z5.h, z0.h[2]\n"
+ "fmla z17.h, z5.h, z0.h[3]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "fmla z20.h, z5.h, z0.h[4]\n"
+ "fmla z23.h, z5.h, z0.h[5]\n"
+ "fmla z26.h, z5.h, z0.h[6]\n"
+ "fmla z29.h, z5.h, z0.h[7]\n"
+ "fmla z9.h, z6.h, z0.h[0]\n"
+ "fmla z12.h, z6.h, z0.h[1]\n"
+ "fmla z15.h, z6.h, z0.h[2]\n"
+ "fmla z18.h, z6.h, z0.h[3]\n"
+ "fmla z21.h, z6.h, z0.h[4]\n"
+ "fmla z24.h, z6.h, z0.h[5]\n"
+ "fmla z27.h, z6.h, z0.h[6]\n"
+ "fmla z30.h, z6.h, z0.h[7]\n"
+ "fmla z10.h, z7.h, z0.h[0]\n"
+ "fmla z13.h, z7.h, z0.h[1]\n"
+ "fmla z16.h, z7.h, z0.h[2]\n"
+ "fmla z19.h, z7.h, z0.h[3]\n"
+ "fmla z22.h, z7.h, z0.h[4]\n"
+ "fmla z25.h, z7.h, z0.h[5]\n"
+ "fmla z28.h, z7.h, z0.h[6]\n"
+ "fmla z31.h, z7.h, z0.h[7]\n"
+ "6:" // multiply loop done
+ "dech x24, ALL, MUL #3\n"
+ "st1h { z8.h }, p0, [%x[Cpanel]]\n"
+ "cmp x24, XZR\n"
+ "st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1h { z12.h }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1h { z13.h }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1h { z14.h }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1h { z15.h }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1h { z16.h }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1h { z17.h }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1h { z18.h }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1h { z19.h }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1h { z20.h }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1h { z21.h }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1h { z22.h }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1h { z23.h }, p0, [%x[Cpanel], #-1, MUL VL]\n"
+ "st1h { z24.h }, p0, [%x[Cpanel]]\n"
+ "st1h { z25.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1h { z26.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1h { z28.h }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1h { z29.h }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1h { z30.h }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1h { z31.h }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #8\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp
new file mode 100644
index 0000000000..aa3507ee73
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const float *, const float *, size_t, \
+ float *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffinterleaved_fp32_mla_8x3VL( ARGLIST );
+void sve_ffinterleaved_fp32_mla_8x3VL_a64fx( ARGLIST );
+
+class cls_sve_ffinterleaved_fp32_mla_8x3VL
+{
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<float>() * 3;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<float>();
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL1VL_BL32;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 13.51, 9.27, 3.98 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffinterleaved_fp32_mla_8x3VL;
+ cls_sve_ffinterleaved_fp32_mla_8x3VL(const CPUInfo *ci)
+ {
+ switch(ci->get_cpu_model()) {
+ default:
+ break;
+ case CPUModel::A64FX:
+ kernel=sve_ffinterleaved_fp32_mla_8x3VL_a64fx;
+ break;
+ }
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
new file mode 100644
index 0000000000..aecf7f94c9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstddef>
+
+namespace arm_gemm {
+
+void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
+ const float *Apanel,
+ const float *Bpanel,
+ size_t B_stride,
+ float *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const float *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const float *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/1) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "1:" // Height loop
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
+ "bgt 3f\n"
+ "decw x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
+ "bgt 3f\n"
+ "mov x21, x25\n"
+ "3:" // B setup done
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "fmla z8.s, p0/M, z0.s, z3.s\n"
+ "fmla z9.s, p0/M, z1.s, z3.s\n"
+ "sub x19, x19, #0x2\n"
+ "fmla z10.s, p0/M, z2.s, z3.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "fmla z11.s, p0/M, z0.s, z4.s\n"
+ "fmla z12.s, p0/M, z1.s, z4.s\n"
+ "fmla z13.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
+ "fmla z14.s, p0/M, z0.s, z5.s\n"
+ "fmla z15.s, p0/M, z1.s, z5.s\n"
+ "cmp x19, #0x2\n"
+ "fmla z16.s, p0/M, z2.s, z5.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
+ "fmla z17.s, p0/M, z0.s, z6.s\n"
+ "fmla z18.s, p0/M, z1.s, z6.s\n"
+ "fmla z19.s, p0/M, z2.s, z6.s\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "fmla z20.s, p0/M, z0.s, z3.s\n"
+ "fmla z21.s, p0/M, z1.s, z3.s\n"
+ "fmla z22.s, p0/M, z2.s, z3.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #32]\n"
+ "fmla z23.s, p0/M, z0.s, z4.s\n"
+ "fmla z24.s, p0/M, z1.s, z4.s\n"
+ "fmla z25.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #36]\n"
+ "fmla z26.s, p0/M, z0.s, z5.s\n"
+ "fmla z27.s, p0/M, z1.s, z5.s\n"
+ "fmla z28.s, p0/M, z2.s, z5.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #40]\n"
+ "fmla z29.s, p0/M, z0.s, z6.s\n"
+ "ld1w { z0.s }, p0/Z, [x25, #1, MUL VL]\n"
+ "fmla z30.s, p0/M, z1.s, z6.s\n"
+ "fmla z31.s, p0/M, z2.s, z6.s\n"
+ "ld1w { z1.s }, p0/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x20, #1, MUL VL]\n"
+ "fmla z8.s, p0/M, z0.s, z3.s\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #44]\n"
+ "fmla z9.s, p0/M, z1.s, z3.s\n"
+ "fmla z10.s, p0/M, z2.s, z3.s\n"
+ "fmla z11.s, p0/M, z0.s, z4.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #48]\n"
+ "fmla z12.s, p0/M, z1.s, z4.s\n"
+ "fmla z13.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
+ "fmla z14.s, p0/M, z0.s, z5.s\n"
+ "fmla z15.s, p0/M, z1.s, z5.s\n"
+ "addvl x25, x25, #2\n"
+ "fmla z16.s, p0/M, z2.s, z5.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #56]\n"
+ "fmla z17.s, p0/M, z0.s, z6.s\n"
+ "fmla z18.s, p0/M, z1.s, z6.s\n"
+ "fmla z19.s, p0/M, z2.s, z6.s\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #60]\n"
+ "addvl x21, x21, #2\n"
+ "addvl x20, x20, #2\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ "fmla z20.s, p0/M, z0.s, z3.s\n"
+ "fmla z21.s, p0/M, z1.s, z3.s\n"
+ "fmla z22.s, p0/M, z2.s, z3.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
+ "fmla z23.s, p0/M, z0.s, z4.s\n"
+ "fmla z24.s, p0/M, z1.s, z4.s\n"
+ "fmla z25.s, p0/M, z2.s, z4.s\n"
+ "fmla z26.s, p0/M, z0.s, z5.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
+ "fmla z27.s, p0/M, z1.s, z5.s\n"
+ "fmla z28.s, p0/M, z2.s, z5.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
+ "fmla z29.s, p0/M, z0.s, z6.s\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
+ "fmla z30.s, p0/M, z1.s, z6.s\n"
+ "fmla z31.s, p0/M, z2.s, z6.s\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "fmla z8.s, p0/M, z0.s, z3.s\n"
+ "fmla z9.s, p0/M, z1.s, z3.s\n"
+ "addvl x25, x25, #1\n"
+ "fmla z10.s, p0/M, z2.s, z3.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "fmla z11.s, p0/M, z0.s, z4.s\n"
+ "fmla z12.s, p0/M, z1.s, z4.s\n"
+ "fmla z13.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
+ "fmla z14.s, p0/M, z0.s, z5.s\n"
+ "fmla z15.s, p0/M, z1.s, z5.s\n"
+ "addvl x21, x21, #1\n"
+ "fmla z16.s, p0/M, z2.s, z5.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
+ "fmla z17.s, p0/M, z0.s, z6.s\n"
+ "fmla z18.s, p0/M, z1.s, z6.s\n"
+ "fmla z19.s, p0/M, z2.s, z6.s\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "fmla z20.s, p0/M, z0.s, z3.s\n"
+ "fmla z21.s, p0/M, z1.s, z3.s\n"
+ "addvl x20, x20, #1\n"
+ "fmla z22.s, p0/M, z2.s, z3.s\n"
+ "fmla z23.s, p0/M, z0.s, z4.s\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla z24.s, p0/M, z1.s, z4.s\n"
+ "fmla z25.s, p0/M, z2.s, z4.s\n"
+ "fmla z26.s, p0/M, z0.s, z5.s\n"
+ "fmla z27.s, p0/M, z1.s, z5.s\n"
+ "fmla z28.s, p0/M, z2.s, z5.s\n"
+ "fmla z29.s, p0/M, z0.s, z6.s\n"
+ "fmla z30.s, p0/M, z1.s, z6.s\n"
+ "fmla z31.s, p0/M, z2.s, z6.s\n"
+ "cbz x19, 6f\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
+ "fmla z8.s, p0/M, z0.s, z3.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
+ "fmla z9.s, p0/M, z1.s, z3.s\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
+ "fmla z10.s, p0/M, z2.s, z3.s\n"
+ "fmla z11.s, p0/M, z0.s, z4.s\n"
+ "fmla z12.s, p0/M, z1.s, z4.s\n"
+ "fmla z13.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "fmla z14.s, p0/M, z0.s, z5.s\n"
+ "fmla z15.s, p0/M, z1.s, z5.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
+ "fmla z16.s, p0/M, z2.s, z5.s\n"
+ "fmla z17.s, p0/M, z0.s, z6.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n"
+ "fmla z18.s, p0/M, z1.s, z6.s\n"
+ "fmla z19.s, p0/M, z2.s, z6.s\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "fmla z20.s, p0/M, z0.s, z3.s\n"
+ "fmla z21.s, p0/M, z1.s, z3.s\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla z22.s, p0/M, z2.s, z3.s\n"
+ "fmla z23.s, p0/M, z0.s, z4.s\n"
+ "fmla z24.s, p0/M, z1.s, z4.s\n"
+ "fmla z25.s, p0/M, z2.s, z4.s\n"
+ "fmla z26.s, p0/M, z0.s, z5.s\n"
+ "fmla z27.s, p0/M, z1.s, z5.s\n"
+ "fmla z28.s, p0/M, z2.s, z5.s\n"
+ "fmla z29.s, p0/M, z0.s, z6.s\n"
+ "fmla z30.s, p0/M, z1.s, z6.s\n"
+ "fmla z31.s, p0/M, z2.s, z6.s\n"
+ "6:" // multiply loop done
+ "decw x24, ALL, MUL #3\n"
+ "st1w { z8.s }, p0, [%x[Cpanel]]\n"
+ "cmp x24, XZR\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z12.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z13.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z20.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z21.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z22.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
+ "st1w { z24.s }, p0, [%x[Cpanel]]\n"
+ "st1w { z25.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z26.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z28.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z29.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z30.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z31.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #8\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
new file mode 100644
index 0000000000..1e9a3f119e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstddef>
+
+namespace arm_gemm {
+
+void sve_ffinterleaved_fp32_mla_8x3VL(
+ const float *Apanel,
+ const float *Bpanel,
+ size_t B_stride,
+ float *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const float *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const float *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/1) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "1:" // Height loop
+ "ldr x25, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x24, [%x[args_ptr], %[offsetof_N]]\n"
+ "str x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov x23, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x25, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x19, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x22, ALL, MUL #2\n"
+ "add x21, x25, x19, LSL #2\n"
+ "add x20, x21, x19, LSL #2\n"
+ "add x19, x20, x19, LSL #2\n"
+ "cmp x24, x22\n"
+ "str x19, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "mov %x[Apanel], x23\n"
+ "bgt 3f\n"
+ "decw x22\n"
+ "cmp x24, x22\n"
+ "mov x20, x25\n"
+ "bgt 3f\n"
+ "mov x21, x25\n"
+ "3:" // B setup done
+ "ldr x19, [%x[args_ptr], %[offsetof_K]]\n"
+ "cmp x19, #0x2\n"
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
+ "mov z11.b, #0x0\n"
+ "mov z12.b, #0x0\n"
+ "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
+ "mov z13.b, #0x0\n"
+ "mov z14.b, #0x0\n"
+ "ld1w { z4.s }, p0/Z, [x25]\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "ld1w { z5.s }, p0/Z, [x21]\n"
+ "mov z17.b, #0x0\n"
+ "mov z18.b, #0x0\n"
+ "ld1w { z6.s }, p0/Z, [x20]\n"
+ "mov z19.b, #0x0\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "fmla z11.s, z4.s, z0.s[1]\n"
+ "ld1rqw { z2.s }, p0/Z, [%x[Apanel], #32]\n"
+ "fmla z14.s, z4.s, z0.s[2]\n"
+ "fmla z17.s, z4.s, z0.s[3]\n"
+ "ld1rqw { z3.s }, p0/Z, [%x[Apanel], #48]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z23.s, z4.s, z1.s[1]\n"
+ "sub x19, x19, #0x2\n"
+ "fmla z26.s, z4.s, z1.s[2]\n"
+ "fmla z29.s, z4.s, z1.s[3]\n"
+ "ld1w { z4.s }, p0/Z, [x25, #1, MUL VL]\n"
+ "fmla z9.s, z5.s, z0.s[0]\n"
+ "fmla z12.s, z5.s, z0.s[1]\n"
+ "addvl x25, x25, #2\n"
+ "fmla z15.s, z5.s, z0.s[2]\n"
+ "fmla z18.s, z5.s, z0.s[3]\n"
+ "cmp x19, #0x2\n"
+ "fmla z21.s, z5.s, z1.s[0]\n"
+ "fmla z24.s, z5.s, z1.s[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ "fmla z27.s, z5.s, z1.s[2]\n"
+ "fmla z30.s, z5.s, z1.s[3]\n"
+ "ld1w { z5.s }, p0/Z, [x21, #1, MUL VL]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z13.s, z6.s, z0.s[1]\n"
+ "addvl x21, x21, #2\n"
+ "fmla z16.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
+ "fmla z22.s, z6.s, z1.s[0]\n"
+ "fmla z25.s, z6.s, z1.s[1]\n"
+ "fmla z28.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w { z6.s }, p0/Z, [x20, #1, MUL VL]\n"
+ "addvl x20, x20, #2\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "fmla z11.s, z4.s, z2.s[1]\n"
+ "fmla z14.s, z4.s, z2.s[2]\n"
+ "fmla z17.s, z4.s, z2.s[3]\n"
+ "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z23.s, z4.s, z3.s[1]\n"
+ "fmla z26.s, z4.s, z3.s[2]\n"
+ "fmla z29.s, z4.s, z3.s[3]\n"
+ "ld1w { z4.s }, p0/Z, [x25]\n"
+ "fmla z9.s, z5.s, z2.s[0]\n"
+ "fmla z12.s, z5.s, z2.s[1]\n"
+ "fmla z15.s, z5.s, z2.s[2]\n"
+ "fmla z18.s, z5.s, z2.s[3]\n"
+ "fmla z21.s, z5.s, z3.s[0]\n"
+ "fmla z24.s, z5.s, z3.s[1]\n"
+ "fmla z27.s, z5.s, z3.s[2]\n"
+ "fmla z30.s, z5.s, z3.s[3]\n"
+ "ld1w { z5.s }, p0/Z, [x21]\n"
+ "fmla z10.s, z6.s, z2.s[0]\n"
+ "fmla z13.s, z6.s, z2.s[1]\n"
+ "fmla z16.s, z6.s, z2.s[2]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "fmla z22.s, z6.s, z3.s[0]\n"
+ "fmla z25.s, z6.s, z3.s[1]\n"
+ "fmla z28.s, z6.s, z3.s[2]\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "ld1w { z6.s }, p0/Z, [x20]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "fmla z11.s, z4.s, z0.s[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "fmla z14.s, z4.s, z0.s[2]\n"
+ "fmla z17.s, z4.s, z0.s[3]\n"
+ "addvl x25, x25, #1\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z23.s, z4.s, z1.s[1]\n"
+ "addvl x21, x21, #1\n"
+ "fmla z26.s, z4.s, z1.s[2]\n"
+ "fmla z29.s, z4.s, z1.s[3]\n"
+ "addvl x20, x20, #1\n"
+ "fmla z9.s, z5.s, z0.s[0]\n"
+ "fmla z12.s, z5.s, z0.s[1]\n"
+ "fmla z15.s, z5.s, z0.s[2]\n"
+ "fmla z18.s, z5.s, z0.s[3]\n"
+ "fmla z21.s, z5.s, z1.s[0]\n"
+ "fmla z24.s, z5.s, z1.s[1]\n"
+ "fmla z27.s, z5.s, z1.s[2]\n"
+ "fmla z30.s, z5.s, z1.s[3]\n"
+ "fmla z10.s, z6.s, z0.s[0]\n"
+ "fmla z13.s, z6.s, z0.s[1]\n"
+ "fmla z16.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "fmla z22.s, z6.s, z1.s[0]\n"
+ "fmla z25.s, z6.s, z1.s[1]\n"
+ "fmla z28.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "cbz x19, 6f\n"
+ "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
+ "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "ld1w { z7.s }, p0/Z, [x25]\n"
+ "ld1w { z4.s }, p0/Z, [x21]\n"
+ "fmla z8.s, z7.s, z0.s[0]\n"
+ "ld1w { z5.s }, p0/Z, [x20]\n"
+ "fmla z11.s, z7.s, z0.s[1]\n"
+ "fmla z14.s, z7.s, z0.s[2]\n"
+ "fmla z17.s, z7.s, z0.s[3]\n"
+ "fmla z20.s, z7.s, z1.s[0]\n"
+ "fmla z23.s, z7.s, z1.s[1]\n"
+ "fmla z26.s, z7.s, z1.s[2]\n"
+ "fmla z29.s, z7.s, z1.s[3]\n"
+ "fmla z9.s, z4.s, z0.s[0]\n"
+ "fmla z12.s, z4.s, z0.s[1]\n"
+ "fmla z15.s, z4.s, z0.s[2]\n"
+ "fmla z18.s, z4.s, z0.s[3]\n"
+ "fmla z21.s, z4.s, z1.s[0]\n"
+ "fmla z24.s, z4.s, z1.s[1]\n"
+ "fmla z27.s, z4.s, z1.s[2]\n"
+ "fmla z30.s, z4.s, z1.s[3]\n"
+ "fmla z10.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z16.s, z5.s, z0.s[2]\n"
+ "fmla z19.s, z5.s, z0.s[3]\n"
+ "fmla z22.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z28.s, z5.s, z1.s[2]\n"
+ "fmla z31.s, z5.s, z1.s[3]\n"
+ "6:" // multiply loop done
+ "decw x24, ALL, MUL #3\n"
+ "st1w { z8.s }, p0, [%x[Cpanel]]\n"
+ "cmp x24, XZR\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z12.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z13.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z20.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z21.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z22.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
+ "st1w { z24.s }, p0, [%x[Cpanel]]\n"
+ "st1w { z25.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z26.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z28.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z29.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z30.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z31.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #8\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/misc.cpp b/src/core/NEON/kernels/arm_gemm/misc.cpp
index 229e6b56f9..cf99bbdb46 100644
--- a/src/core/NEON/kernels/arm_gemm/misc.cpp
+++ b/src/core/NEON/kernels/arm_gemm/misc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,11 @@
#ifndef NO_MULTI_THREADING
#include <mutex>
#endif
+#include <cstdint>
+
+#include "arm_gemm.hpp"
+#include "kernel_weight_format.hpp"
+#include "utils.hpp"
namespace arm_gemm {
@@ -32,4 +37,39 @@ namespace arm_gemm {
std::mutex report_mutex;
#endif
-} // namespace arm_gemm \ No newline at end of file
+WeightFormat get_weight_format(const KernelWeightFormat kwf, size_t element_size) {
+ if (kwf==KernelWeightFormat::NON_FIXED) {
+ return WeightFormat::UNSPECIFIED;
+ }
+
+ uint32_t kwf_i = static_cast<uint32_t>(kwf);
+ uint32_t wf_i = 0;
+
+ const auto block_bytes = (kwf_i >> 8) & 0xf;
+ const auto vector_count = (kwf_i >> 12) & 0xf;
+
+ uint32_t vector_bytes;
+
+ // For fast mode BF16 kernels set the appropriate bit and override element size to 2.
+ if (kwf_i & 0x10) {
+ element_size = 2;
+ wf_i |= 0x10;
+ }
+
+ // Get total bytes in vector output
+ if (kwf_i & 0x1) {
+ vector_bytes = vector_count * get_vector_length<uint8_t>();
+ } else {
+ vector_bytes = vector_count * 16;
+ }
+
+ auto input_blocking = block_bytes / element_size;
+ auto output_blocking = vector_bytes / block_bytes;
+
+ wf_i |= (input_blocking << 20);
+ wf_i |= (output_blocking << 8);
+
+ return static_cast<WeightFormat>(wf_i);
+}
+
+} // namespace arm_gemm