aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-10-26 19:05:32 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-08 12:00:31 +0000
commit421405b6a21b124288a750e2da26dc01eb7391cb (patch)
tree35f5655ce9d8b5921cb03630534f532e4eb47bf5
parentf1adf11c776aebaa8da1b8644a4ba2453afd2b81 (diff)
downloadComputeLibrary-421405b6a21b124288a750e2da26dc01eb7391cb.tar.gz
COMPMID-1675: Add SVE support
Change-Id: I86679adff556b6ffc9929b35cbf1b59b3958bdb1
-rw-r--r--SConscript2
-rw-r--r--SConstruct26
-rw-r--r--examples/neon_sgemm.cpp192
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp18
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp14
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int16.cpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int8.cpp14
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp12
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp324
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp333
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp334
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp328
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp75
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp366
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/list.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp1208
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp1564
-rw-r--r--src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp71
-rw-r--r--src/core/NEON/kernels/arm_gemm/transform.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/list.hpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp596
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp632
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp632
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp632
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp596
-rw-r--r--src/core/NEON/kernels/arm_gemm/utils.hpp24
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h28
-rw-r--r--src/runtime/NEON/functions/NEDeconvolutionLayer.cpp4
-rw-r--r--support/Semaphore.h5
-rw-r--r--tests/AssetsLibrary.cpp4
-rw-r--r--tests/AssetsLibrary.h4
-rw-r--r--tests/SConscript2
-rw-r--r--tests/TensorCache.h14
-rw-r--r--tests/framework/instruments/Instruments.h2
38 files changed, 8318 insertions, 42 deletions
diff --git a/SConscript b/SConscript
index 86f2789de5..7d0717b27e 100644
--- a/SConscript
+++ b/SConscript
@@ -210,6 +210,8 @@ if env['neon']:
if "arm64-v8" in env['arch']:
core_files += Glob('src/core/NEON/kernels/arm_gemm/kernels/a64_*/*.cpp')
+ if "sve" in env['arch']:
+ core_files += Glob('src/core/NEON/kernels/arm_gemm/kernels/sve_*/*.cpp')
runtime_files += Glob('src/runtime/NEON/*.cpp')
runtime_files += Glob('src/runtime/NEON/functions/*.cpp')
diff --git a/SConstruct b/SConstruct
index c1d1f716a8..5f20589aae 100644
--- a/SConstruct
+++ b/SConstruct
@@ -40,7 +40,7 @@ vars.AddVariables(
BoolVariable("debug", "Debug", False),
BoolVariable("asserts", "Enable asserts (this flag is forced to 1 for debug=1)", False),
BoolVariable("logging", "Logging (this flag is forced to 1 for debug=1)", False),
- EnumVariable("arch", "Target Architecture", "armv7a", allowed_values=("armv7a", "arm64-v8a", "arm64-v8.2-a", "x86_32", "x86_64")),
+ EnumVariable("arch", "Target Architecture", "armv7a", allowed_values=("armv7a", "arm64-v8a", "arm64-v8.2-a", "arm64-v8.2-a-sve", "x86_32", "x86_64")),
EnumVariable("os", "Target OS", "linux", allowed_values=("linux", "android", "bare_metal")),
EnumVariable("build", "Build type", "cross_compile", allowed_values=("native", "cross_compile", "embed_only")),
BoolVariable("examples", "Build example programs", True),
@@ -58,6 +58,7 @@ vars.AddVariables(
#FIXME Remove before release (And remove all references to INTERNAL_ONLY)
BoolVariable("internal_only", "Enable ARM internal only tests", False),
("extra_cxx_flags", "Extra CXX flags to be appended to the build command", ""),
+ ("extra_link_flags", "Extra LD flags to be appended to the build command", ""),
("compiler_cache", "Command to prefix to the C and C++ compiler (e.g ccache)", "")
)
@@ -174,17 +175,23 @@ elif env['arch'] == 'arm64-v8a':
prefix = "aarch64-linux-android-"
if 'clang++' in cpp_compiler:
env.Append(CXXFLAGS = ['-no-integrated-as'])
-elif env['arch'] == 'arm64-v8.2-a':
- env.Append(CXXFLAGS = ['-march=armv8.2-a+fp16']) # explicitly enable fp16 extension otherwise __ARM_FEATURE_FP16_VECTOR_ARITHMETIC is undefined
+elif 'arm64-v8.2-a' in env['arch']:
+ if env['arch'] == 'arm64-v8.2-a-sve':
+ if env['os'] != 'bare_metal':
+ print("Only bare metal SVE is supported at the moment")
+ Exit(1)
+ env.Append(CXXFLAGS = ['-march=armv8.2-a+sve+fp16+dotprod'])
+ else:
+ env.Append(CXXFLAGS = ['-march=armv8.2-a+fp16']) # explicitly enable fp16 extension otherwise __ARM_FEATURE_FP16_VECTOR_ARITHMETIC is undefined
+ if env['os'] == 'linux':
+ prefix = "aarch64-linux-gnu-"
+ elif env['os'] == 'bare_metal':
+ prefix = "aarch64-elf-"
+ elif env['os'] == 'android':
+ prefix = "aarch64-linux-android-"
env.Append(CPPDEFINES = ['ARM_COMPUTE_AARCH64_V8_2','NO_DOT_IN_TOOLCHAIN'])
if 'clang++' in cpp_compiler:
env.Append(CXXFLAGS = ['-no-integrated-as'])
- if env['os'] == 'linux':
- prefix = "aarch64-linux-gnu-"
- elif env['os'] == 'bare_metal':
- prefix = "aarch64-elf-"
- elif env['os'] == 'android':
- prefix = "aarch64-linux-android-"
elif env['arch'] == 'x86_32':
env.Append(CCFLAGS = ['-m32'])
env.Append(LINKFLAGS = ['-m32'])
@@ -274,6 +281,7 @@ if env['logging']:
env.Append(CPPPATH = ['#/include', "#"])
env.Append(CXXFLAGS = env['extra_cxx_flags'])
+env.Append(LINKFLAGS = env['extra_link_flags'])
Default( install_include("arm_compute"))
diff --git a/examples/neon_sgemm.cpp b/examples/neon_sgemm.cpp
new file mode 100644
index 0000000000..f6f93dd507
--- /dev/null
+++ b/examples/neon_sgemm.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/NEFunctions.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "utils/Utils.h"
+
+#include <cstdlib>
+
+using namespace arm_compute;
+using namespace utils;
+
+class NESGEMMExample : public Example
+{
+public:
+ bool do_setup(int argc, char **argv) override
+ {
+ NPYLoader npy0, npy1, npy2;
+ alpha = 1.0f;
+ beta = 0.0f;
+
+ std::ifstream stream;
+ if(argc > 1)
+ {
+ stream.open(argv[1], std::fstream::in);
+ }
+
+ if(argc < 3 || (argc < 4 && stream.bad()))
+ {
+ // Print help
+ std::cout << "Usage: 1) ./build/neon_sgemm input_matrix_1.npy input_matrix_2.npy [input_matrix_3.npy] [alpha = 1] [beta = 0]\n";
+ std::cout << " 2) ./build/neon_sgemm M N K [alpha = 1.0f] [beta = 0.0f]\n\n";
+ std::cout << "Too few or no input_matrices provided. Using M=7, N=3, K=5, alpha=1.0f and beta=0.0f\n\n";
+
+ src0.allocator()->init(TensorInfo(TensorShape(5U, 7U), 1, DataType::F32));
+ src1.allocator()->init(TensorInfo(TensorShape(3U, 5U), 1, DataType::F32));
+ src2.allocator()->init(TensorInfo(TensorShape(3U, 7U), 1, DataType::F32));
+ }
+ else
+ {
+ if(stream.good()) /* case file1.npy file2.npy [file3.npy] [alpha = 1.0f] [beta = 0.0f] */
+ {
+ npy0.open(argv[1]);
+ npy0.init_tensor(src0, DataType::F32);
+ npy1.open(argv[2]);
+ npy1.init_tensor(src1, DataType::F32);
+
+ if(argc > 3)
+ {
+ stream.close();
+ stream.clear();
+ stream.open(argv[3], std::fstream::in);
+ if(stream.good()) /* case with third file */
+ {
+ npy2.open(argv[3]);
+ npy2.init_tensor(src2, DataType::F32);
+
+ if(argc > 4)
+ {
+ // Convert string to float
+ alpha = strtof(argv[4], nullptr);
+
+ if(argc > 5)
+ {
+ // Convert string to float
+ beta = strtof(argv[5], nullptr);
+ }
+ }
+ }
+ else /* case without third file */
+ {
+ alpha = strtof(argv[3], nullptr);
+
+ if(argc > 4)
+ {
+ beta = strtof(argv[4], nullptr);
+ }
+ }
+ }
+ }
+ else /* case M N K [alpha = 1.0f] [beta = 0.0f] */
+ {
+ size_t M = strtol(argv[1], nullptr, 10);
+ size_t N = strtol(argv[2], nullptr, 10);
+ size_t K = strtol(argv[3], nullptr, 10);
+
+ src0.allocator()->init(TensorInfo(TensorShape(K, M), 1, DataType::F32));
+ src1.allocator()->init(TensorInfo(TensorShape(N, K), 1, DataType::F32));
+ src2.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::F32));
+
+ if(argc > 4)
+ {
+ alpha = strtof(argv[4], nullptr);
+
+ if(argc > 5)
+ {
+ beta = strtof(argv[5], nullptr);
+ }
+ }
+ }
+ }
+
+ init_sgemm_output(dst, src0, src1, DataType::F32);
+
+ // Configure function
+ sgemm.configure(&src0, &src1, nullptr, &dst, alpha, beta);
+
+ // Allocate all the images
+ src0.allocator()->allocate();
+ src1.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ // Fill the input images with either the data provided or random data
+ if(npy0.is_open())
+ {
+ npy0.fill_tensor(src0);
+ npy1.fill_tensor(src1);
+
+ output_filename = "sgemm_out.npy";
+ is_fortran = npy0.is_fortran();
+
+ if(npy2.is_open())
+ {
+ src2.allocator()->allocate();
+ npy2.fill_tensor(src2);
+ }
+ }
+ else
+ {
+ src2.allocator()->allocate();
+
+ fill_random_tensor(src0, -1.f, 1.f);
+ fill_random_tensor(src1, -1.f, 1.f);
+ fill_random_tensor(src2, -1.f, 1.f);
+ }
+
+ // Dummy run for CLTuner
+ sgemm.run();
+
+ return true;
+ }
+ void do_run() override
+ {
+ // Execute the function
+ sgemm.run();
+ }
+ void do_teardown() override
+ {
+ if(!output_filename.empty()) /* Save to .npy file */
+ {
+ save_to_npy(dst, output_filename, is_fortran);
+ }
+ }
+
+private:
+ Tensor src0{}, src1{}, src2{}, dst{};
+ NEGEMM sgemm{};
+ float alpha{}, beta{};
+ bool is_fortran{};
+ std::string output_filename{};
+};
+
+/** Main program for sgemm test
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments ( [optional] Matrix A, [optional] Matrix B, [optional] Matrix C, [optional] alpha, [optional] beta )
+ */
+int main(int argc, char **argv)
+{
+ return utils::run_example<NESGEMMExample>(argc, argv);
+}
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
index 4579ebd307..9194bdd4d4 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
@@ -34,10 +34,22 @@
#include "kernels/a64_hgemm_24x8.hpp"
#include "kernels/a64_sgemm_12x8.hpp"
#include "kernels/a32_sgemm_8x6.hpp"
+#include "kernels/sve_interleaved_fp16_mla_3VLx8.hpp"
namespace arm_gemm {
-#ifdef __aarch64__
+#ifdef __ARM_FEATURE_SVE
+class GemmImpl_gemm_fp16_interleaved_fp16 : public GemmImplementation<__fp16, __fp16> {
+public:
+
+ UniqueGemmCommon<__fp16, __fp16> instantiate(const GemmArgs<__fp16> &args) override {
+ return UniqueGemmCommon<__fp16, __fp16>(new GemmInterleaved<interleaved_fp16_mla_3VLx8, __fp16, __fp16>(args));
+ }
+
+ GemmImpl_gemm_fp16_interleaved_fp16() : GemmImplementation<__fp16, __fp16>(GemmMethod::GEMM_INTERLEAVED_FP16) { }
+};
+
+#elif defined(__aarch64__)
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) || defined(FP16_KERNELS)
class GemmImpl_gemm_fp16_interleaved_fp16 : public GemmImplementation<__fp16, __fp16> {
@@ -73,13 +85,13 @@ public:
GemmImpl_gemm_fp16_interleaved() : GemmImplementation<__fp16, __fp16>(GemmMethod::GEMM_INTERLEAVED) { }
};
-#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS))
+#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS) || defined(__ARM_FEATURE_SVE))
static GemmImpl_gemm_fp16_interleaved_fp16 gemm_fp16_interleaved_fp16_impl{};
#endif
static GemmImpl_gemm_fp16_interleaved gemm_fp16_interleaved_impl{};
static std::vector<GemmImplementation<__fp16, __fp16> *> gemm_fp16_methods = {
-#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS))
+#if defined(__aarch64__) && (defined(__ARM_FEATURE_VECTOR_ARITHMETIC) || defined(FP16_KERNELS) || defined(__ARM_FEATURE_SVE))
&gemm_fp16_interleaved_fp16_impl,
#endif
&gemm_fp16_interleaved_impl
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index e840e90eec..7d14971b70 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -36,10 +36,12 @@
#include "kernels/a64_sgemv_pretransposed.hpp"
#include "kernels/a64_sgemm_native_16x4.hpp"
+#include "kernels/sve_interleaved_fp32_mla_3VLx8.hpp"
+
namespace arm_gemm {
-#ifdef __aarch64__
-// SGEMM implementations for AArch64
+#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
+// SGEMM implementations for AArch64 without SVE
// Pretransposed GEMV
class GemmImpl_sgemm_gemv_pretransposed : public GemmImplementation<float, float> {
@@ -92,7 +94,9 @@ public:
class GemmImpl_sgemm_gemm_interleaved : public GemmImplementation<float, float> {
public:
UniqueGemmCommon<float, float> instantiate(const GemmArgs<float> &args) override {
-#ifdef __aarch64__
+#ifdef __ARM_FEATURE_SVE
+ return UniqueGemmCommon<float, float> (new GemmInterleaved<interleaved_fp32_mla_3VLx8, float, float>(args));
+#elif defined(__aarch64__)
return UniqueGemmCommon<float, float> (new GemmInterleaved<sgemm_12x8, float, float>(args));
#elif defined(__arm__)
return UniqueGemmCommon<float, float> (new GemmInterleaved<sgemm_8x6, float, float>(args));
@@ -105,7 +109,7 @@ public:
};
static GemmImpl_gemv_batched<float, float> gemv_batched_impl{};
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
static GemmImpl_sgemm_gemv_pretransposed sgemm_gemv_pretransposed_impl{};
static GemmImpl_sgemm_gemv_native_transposed sgemm_gemv_native_transposed_impl{};
static GemmImpl_sgemm_gemm_native sgemm_gemm_native_impl{};
@@ -115,7 +119,7 @@ static GemmImpl_sgemm_gemm_interleaved sgemm_gemm_interleaved_impl{};
/* List of implementations (order matters) */
static std::vector<GemmImplementation<float, float> *> SGemmMethods = {
&gemv_batched_impl,
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
&sgemm_gemv_pretransposed_impl,
&sgemm_gemv_native_transposed_impl,
&sgemm_gemm_native_impl,
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
index b7e8fa21af..ad171a7f9a 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
@@ -59,4 +59,4 @@ template bool method_is_compatible<int16_t, int32_t>(GemmMethod method, GemmArgs
} // namespace arm_gemm
-#endif // __aarch64__
+#endif // __aarch64__ \ No newline at end of file
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index dffa056adc..627d8abdb9 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -31,9 +31,21 @@
#include "kernels/a64_gemm_s16_12x8.hpp"
#include "kernels/a64_gemm_s8_12x8.hpp"
#include "kernels/a64_gemm_s8_4x4.hpp"
+#include "kernels/sve_interleaved_s8s32_dot_3VLx8.hpp"
namespace arm_gemm {
+#ifdef __ARM_FEATURE_SVE
+class GemmImpl_gemm_s8_interleaved_dot : public GemmImplementation<int8_t, int32_t> {
+public:
+ UniqueGemmCommon<int8_t, int32_t> instantiate(const GemmArgs<int32_t> &args) override {
+ return UniqueGemmCommon<int8_t, int32_t>(new GemmInterleaved<interleaved_s8s32_dot_3VLx8, int8_t, int32_t>(args));
+ }
+
+ GemmImpl_gemm_s8_interleaved_dot() : GemmImplementation<int8_t, int32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
+};
+#else
+
class GemmImpl_gemm_s8_interleaved_dot : public GemmImplementation<int8_t, int32_t> {
public:
bool is_supported(const GemmArgs<int32_t> &args) override {
@@ -47,6 +59,8 @@ public:
GemmImpl_gemm_s8_interleaved_dot() : GemmImplementation<int8_t, int32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
};
+#endif
+
class GemmImpl_gemm_s8_interleaved : public GemmImplementation<int8_t, int32_t> {
public:
UniqueGemmCommon<int8_t, int32_t> instantiate(const GemmArgs<int32_t> &args) override {
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
index 60b7954db3..b7c1bab6bd 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
@@ -31,9 +31,20 @@
#include "kernels/a64_gemm_u16_12x8.hpp"
#include "kernels/a64_gemm_u8_12x8.hpp"
#include "kernels/a64_gemm_u8_4x4.hpp"
+#include "kernels/sve_interleaved_u8u32_dot_3VLx8.hpp"
namespace arm_gemm {
+#ifdef __ARM_FEATURE_SVE
+class GemmImpl_gemm_u8_interleaved_dot : public GemmImplementation<uint8_t, uint32_t> {
+public:
+ UniqueGemmCommon<uint8_t, uint32_t> instantiate(const GemmArgs<uint32_t> &args) override {
+ return UniqueGemmCommon<uint8_t, uint32_t>(new GemmInterleaved<interleaved_u8u32_dot_3VLx8, uint8_t, uint32_t>(args));
+ }
+
+ GemmImpl_gemm_u8_interleaved_dot() : GemmImplementation<uint8_t, uint32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
+};
+#else
class GemmImpl_gemm_u8_interleaved_dot : public GemmImplementation<uint8_t, uint32_t> {
public:
bool is_supported(const GemmArgs<uint32_t> &args) override {
@@ -46,6 +57,7 @@ public:
GemmImpl_gemm_u8_interleaved_dot() : GemmImplementation<uint8_t, uint32_t>(GemmMethod::GEMM_INTERLEAVED_DOT) { }
};
+#endif
class GemmImpl_gemm_u8_interleaved : public GemmImplementation<uint8_t, uint32_t> {
public:
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp
new file mode 100644
index 0000000000..3fd738e673
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_fp16_mla_3VLx8(const __fp16 *, const __fp16 *, __fp16 *, int, int, int);
+
+class interleaved_fp16_mla_3VLx8 {
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)(const __fp16 *, const __fp16 *, __fp16 *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcnth() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 1;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_fp16_mla_3VLx8;
+
+ interleaved_fp16_mla_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp
new file mode 100644
index 0000000000..92ec888244
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8/generic.cpp
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_fp16_mla_3VLx8(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16 *Cpanel, int ablocks, int bblocks, int K) {
+ const __fp16 *a_ptr = Apanel;
+ __fp16 *c_ptr = Cpanel;
+
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const __fp16 *a_ptr0 = a_ptr;
+ const __fp16 *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.h, #0\n"
+ "ptrue p0.h\n"
+ "mov z9.h, #0\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "mov z10.h, #0\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "mov z11.h, #0\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z12.h, #0\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "mov z13.h, #0\n"
+ "ld1h z5.h, p0/z, [%[b_ptr], #3, MUL VL]\n"
+ "mov z14.h, #0\n"
+ "ld1h z6.h, p0/z, [%[b_ptr], #4, MUL VL]\n"
+ "mov z15.h, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "mov z16.h, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "mov z17.h, #0\n"
+ "mov z18.h, #0\n"
+ "mov z19.h, #0\n"
+ "mov z20.h, #0\n"
+ "mov z21.h, #0\n"
+ "mov z22.h, #0\n"
+ "mov z23.h, #0\n"
+ "mov z24.h, #0\n"
+ "mov z25.h, #0\n"
+ "mov z26.h, #0\n"
+ "mov z27.h, #0\n"
+ "mov z28.h, #0\n"
+ "mov z29.h, #0\n"
+ "mov z30.h, #0\n"
+ "mov z31.h, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "ld1h z5.h, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "ld1h z6.h, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "ld1h z2.h, p0/z, [%[b_ptr]]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "ld1h z3.h, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "ld1h z4.h, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "ld1rqh z0.h, p0/z, [%[a_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x10\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "st1h z8.h, p0, [%[c_ptr]]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "st1h z16.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "st1h z24.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "st1h z9.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "st1h z17.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "st1h z25.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "st1h z10.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "st1h z18.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla z8.h, z2.h, z0.h[0]\n"
+ "ld1h z7.h, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.h, z2.h, z0.h[1]\n"
+ "ld1rqh z1.h, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.h, z2.h, z0.h[2]\n"
+ "fmla z11.h, z2.h, z0.h[3]\n"
+ "fmla z12.h, z2.h, z0.h[4]\n"
+ "fmla z13.h, z2.h, z0.h[5]\n"
+ "fmla z14.h, z2.h, z0.h[6]\n"
+ "fmla z15.h, z2.h, z0.h[7]\n"
+ "fmla z16.h, z3.h, z0.h[0]\n"
+ "fmla z17.h, z3.h, z0.h[1]\n"
+ "fmla z18.h, z3.h, z0.h[2]\n"
+ "fmla z19.h, z3.h, z0.h[3]\n"
+ "fmla z20.h, z3.h, z0.h[4]\n"
+ "fmla z21.h, z3.h, z0.h[5]\n"
+ "fmla z22.h, z3.h, z0.h[6]\n"
+ "fmla z23.h, z3.h, z0.h[7]\n"
+ "fmla z24.h, z4.h, z0.h[0]\n"
+ "fmla z25.h, z4.h, z0.h[1]\n"
+ "fmla z26.h, z4.h, z0.h[2]\n"
+ "fmla z27.h, z4.h, z0.h[3]\n"
+ "fmla z28.h, z4.h, z0.h[4]\n"
+ "fmla z29.h, z4.h, z0.h[5]\n"
+ "fmla z30.h, z4.h, z0.h[6]\n"
+ "fmla z31.h, z4.h, z0.h[7]\n"
+ "fmla z8.h, z5.h, z1.h[0]\n"
+ "st1h z8.h, p0, [%[c_ptr]]\n"
+ "fmla z9.h, z5.h, z1.h[1]\n"
+ "fmla z10.h, z5.h, z1.h[2]\n"
+ "fmla z11.h, z5.h, z1.h[3]\n"
+ "fmla z12.h, z5.h, z1.h[4]\n"
+ "fmla z13.h, z5.h, z1.h[5]\n"
+ "fmla z14.h, z5.h, z1.h[6]\n"
+ "fmla z15.h, z5.h, z1.h[7]\n"
+ "fmla z16.h, z6.h, z1.h[0]\n"
+ "st1h z16.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z17.h, z6.h, z1.h[1]\n"
+ "fmla z18.h, z6.h, z1.h[2]\n"
+ "fmla z19.h, z6.h, z1.h[3]\n"
+ "fmla z20.h, z6.h, z1.h[4]\n"
+ "fmla z21.h, z6.h, z1.h[5]\n"
+ "fmla z22.h, z6.h, z1.h[6]\n"
+ "fmla z23.h, z6.h, z1.h[7]\n"
+ "fmla z24.h, z7.h, z1.h[0]\n"
+ "st1h z24.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.h, z7.h, z1.h[1]\n"
+ "st1h z9.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z26.h, z7.h, z1.h[2]\n"
+ "st1h z17.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z27.h, z7.h, z1.h[3]\n"
+ "st1h z25.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.h, z7.h, z1.h[4]\n"
+ "st1h z10.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.h, z7.h, z1.h[5]\n"
+ "st1h z18.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.h, z7.h, z1.h[6]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.h, z7.h, z1.h[7]\n"
+ "4:\n"
+ "st1h z26.h, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1h z11.h, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1h z19.h, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1h z27.h, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1h z12.h, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1h z20.h, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1h z28.h, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1h z13.h, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1h z21.h, p0, [%[c_ptr]]\n"
+ "st1h z29.h, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1h z14.h, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1h z22.h, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1h z30.h, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1h z15.h, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1h z23.h, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1h z31.h, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp
new file mode 100644
index 0000000000..b2327f3070
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_fp32_mla_3VLx8(const float *, const float *, float *, int, int, int);
+
+class interleaved_fp32_mla_3VLx8 {
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 1;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_fp32_mla_3VLx8;
+
+ interleaved_fp32_mla_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp
new file mode 100644
index 0000000000..bb08fc7cb0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8/generic.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_fp32_mla_3VLx8(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) {
+ const float *a_ptr = Apanel;
+ float *c_ptr = Cpanel;
+
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const float *a_ptr0 = a_ptr;
+ const float *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.s\n"
+ "mov z9.s, #0\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "mov z11.s, #0\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z12.s, #0\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z13.s, #0\n"
+ "ld1rqw z2.s, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z14.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z15.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z16.s, #0\n"
+ "mov z17.s, #0\n"
+ "mov z18.s, #0\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "ld1rqw z2.s, p0/z, [%[a_ptr], #-0x20]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr]]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "ld1rqw z0.s, p0/z, [%[a_ptr]]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "ld1rqw z1.s, p0/z, [%[a_ptr], #0x10]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "fmla z8.s, z4.s, z0.s[0]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z9.s, z4.s, z0.s[1]\n"
+ "ld1rqw z3.s, p0/z, [%[a_ptr], #-0x10]\n"
+ "fmla z10.s, z4.s, z0.s[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "fmla z11.s, z4.s, z0.s[3]\n"
+ "fmla z20.s, z4.s, z1.s[0]\n"
+ "fmla z21.s, z4.s, z1.s[1]\n"
+ "fmla z22.s, z4.s, z1.s[2]\n"
+ "fmla z23.s, z4.s, z1.s[3]\n"
+ "ld1w z4.s, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "fmla z12.s, z5.s, z0.s[0]\n"
+ "fmla z13.s, z5.s, z0.s[1]\n"
+ "fmla z14.s, z5.s, z0.s[2]\n"
+ "fmla z15.s, z5.s, z0.s[3]\n"
+ "fmla z24.s, z5.s, z1.s[0]\n"
+ "fmla z25.s, z5.s, z1.s[1]\n"
+ "fmla z26.s, z5.s, z1.s[2]\n"
+ "fmla z27.s, z5.s, z1.s[3]\n"
+ "ld1w z5.s, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "fmla z16.s, z6.s, z0.s[0]\n"
+ "fmla z17.s, z6.s, z0.s[1]\n"
+ "fmla z18.s, z6.s, z0.s[2]\n"
+ "fmla z19.s, z6.s, z0.s[3]\n"
+ "fmla z28.s, z6.s, z1.s[0]\n"
+ "fmla z29.s, z6.s, z1.s[1]\n"
+ "fmla z30.s, z6.s, z1.s[2]\n"
+ "fmla z31.s, z6.s, z1.s[3]\n"
+ "ld1w z6.s, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "fmla z8.s, z4.s, z2.s[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "fmla z9.s, z4.s, z2.s[1]\n"
+ "fmla z10.s, z4.s, z2.s[2]\n"
+ "fmla z11.s, z4.s, z2.s[3]\n"
+ "fmla z20.s, z4.s, z3.s[0]\n"
+ "fmla z21.s, z4.s, z3.s[1]\n"
+ "fmla z22.s, z4.s, z3.s[2]\n"
+ "fmla z23.s, z4.s, z3.s[3]\n"
+ "fmla z12.s, z5.s, z2.s[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "fmla z13.s, z5.s, z2.s[1]\n"
+ "fmla z14.s, z5.s, z2.s[2]\n"
+ "fmla z15.s, z5.s, z2.s[3]\n"
+ "fmla z24.s, z5.s, z3.s[0]\n"
+ "fmla z25.s, z5.s, z3.s[1]\n"
+ "fmla z26.s, z5.s, z3.s[2]\n"
+ "fmla z27.s, z5.s, z3.s[3]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "fmla z17.s, z6.s, z2.s[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "fmla z18.s, z6.s, z2.s[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "fmla z19.s, z6.s, z2.s[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "fmla z28.s, z6.s, z3.s[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "fmla z29.s, z6.s, z3.s[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "fmla z30.s, z6.s, z3.s[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "fmla z31.s, z6.s, z3.s[3]\n"
+ "4:\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp
new file mode 100644
index 0000000000..91aa567d4a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_s8s32_dot_3VLx8(const int8_t *, const int8_t *, int32_t *, int, int, int);
+
+class interleaved_s8s32_dot_3VLx8 {
+public:
+ typedef int8_t operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)(const int8_t *, const int8_t *, int32_t *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 4;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_s8s32_dot_3VLx8;
+
+ interleaved_s8s32_dot_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp
new file mode 100644
index 0000000000..2e994a13f3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8/generic.cpp
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_s8s32_dot_3VLx8(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel, int ablocks, int bblocks, int K) {
+ const int8_t *a_ptr = Apanel;
+ int32_t *c_ptr = Cpanel;
+
+ K /= 4;
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const int8_t *a_ptr0 = a_ptr;
+ const int8_t *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.b\n"
+ "mov z9.s, #0\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "mov z11.s, #0\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z12.s, #0\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z13.s, #0\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z14.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z15.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z16.s, #0\n"
+ "mov z17.s, #0\n"
+ "mov z18.s, #0\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #-0x20]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "sdot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "sdot z10.s, z4.b, z0.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "sdot z11.s, z4.b, z0.b[3]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
+ "sdot z21.s, z4.b, z1.b[1]\n"
+ "sdot z22.s, z4.b, z1.b[2]\n"
+ "sdot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "sdot z12.s, z5.b, z0.b[0]\n"
+ "sdot z13.s, z5.b, z0.b[1]\n"
+ "sdot z14.s, z5.b, z0.b[2]\n"
+ "sdot z15.s, z5.b, z0.b[3]\n"
+ "sdot z24.s, z5.b, z1.b[0]\n"
+ "sdot z25.s, z5.b, z1.b[1]\n"
+ "sdot z26.s, z5.b, z1.b[2]\n"
+ "sdot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "sdot z16.s, z6.b, z0.b[0]\n"
+ "sdot z17.s, z6.b, z0.b[1]\n"
+ "sdot z18.s, z6.b, z0.b[2]\n"
+ "sdot z19.s, z6.b, z0.b[3]\n"
+ "sdot z28.s, z6.b, z1.b[0]\n"
+ "sdot z29.s, z6.b, z1.b[1]\n"
+ "sdot z30.s, z6.b, z1.b[2]\n"
+ "sdot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "sdot z8.s, z4.b, z2.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "sdot z9.s, z4.b, z2.b[1]\n"
+ "sdot z10.s, z4.b, z2.b[2]\n"
+ "sdot z11.s, z4.b, z2.b[3]\n"
+ "sdot z20.s, z4.b, z3.b[0]\n"
+ "sdot z21.s, z4.b, z3.b[1]\n"
+ "sdot z22.s, z4.b, z3.b[2]\n"
+ "sdot z23.s, z4.b, z3.b[3]\n"
+ "sdot z12.s, z5.b, z2.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "sdot z13.s, z5.b, z2.b[1]\n"
+ "sdot z14.s, z5.b, z2.b[2]\n"
+ "sdot z15.s, z5.b, z2.b[3]\n"
+ "sdot z24.s, z5.b, z3.b[0]\n"
+ "sdot z25.s, z5.b, z3.b[1]\n"
+ "sdot z26.s, z5.b, z3.b[2]\n"
+ "sdot z27.s, z5.b, z3.b[3]\n"
+ "sdot z16.s, z6.b, z2.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "sdot z17.s, z6.b, z2.b[1]\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "sdot z18.s, z6.b, z2.b[2]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "sdot z19.s, z6.b, z2.b[3]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "sdot z28.s, z6.b, z3.b[0]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "sdot z29.s, z6.b, z3.b[1]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "sdot z30.s, z6.b, z3.b[2]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "sdot z31.s, z6.b, z3.b[3]\n"
+ "4:\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp
new file mode 100644
index 0000000000..ef457e454f
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_interleaved_u8u32_dot_3VLx8(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
+
+class interleaved_u8u32_dot_3VLx8 {
+public:
+ typedef uint8_t operand_type;
+ typedef uint32_t result_type;
+
+ typedef void (*kern_type)(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
+
+ /* Kernel blocking parameters */
+ static int out_width()
+ {
+ return svcntw() * 3;
+ }
+
+ static int out_height()
+ {
+ return 8;
+ }
+
+ static int k_unroll()
+ {
+ return 4;
+ }
+
+ // Use the standard fixed size transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
+
+ kern_type kernel=sve_interleaved_u8u32_dot_3VLx8;
+
+ interleaved_u8u32_dot_3VLx8(const CPUInfo *ci)
+ {
+
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp
new file mode 100644
index 0000000000..f4d33a9efa
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+
+namespace arm_gemm {
+
+void sve_interleaved_u8u32_dot_3VLx8(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cpanel, int ablocks, int bblocks, int K) {
+ const uint8_t *a_ptr = Apanel;
+ uint32_t *c_ptr = Cpanel;
+
+ K /= 4;
+ const long loops_count = (K / 2) - 1;
+ const long tails_count = K % 2;
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const uint8_t *a_ptr0 = a_ptr;
+ const uint8_t *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ long loops = loops_count;
+ long tails = tails_count;
+
+ __asm __volatile (
+ "mov z8.s, #0\n"
+ "ptrue p0.b\n"
+ "mov z9.s, #0\n"
+ "mov z10.s, #0\n"
+ "mov z11.s, #0\n"
+ "mov z12.s, #0\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "mov z13.s, #0\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "mov z14.s, #0\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "mov z15.s, #0\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z16.s, #0\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #0x20]\n"
+ "mov z17.s, #0\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "mov z18.s, #0\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "mov z19.s, #0\n"
+ "mov z20.s, #0\n"
+ "mov z21.s, #0\n"
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+ "cbz %[loops], 1f\n"
+ "2:\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "subs %[loops], %[loops], #0x1\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "ld1rqb z2.b, p0/z, [%[a_ptr], #-0x20]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "b.ne 2b\n"
+ "1:\n"
+ "cbz %[tails], 3f\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr]]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #1, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "ld1rqb z0.b, p0/z, [%[a_ptr]]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #2, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "ld1rqb z1.b, p0/z, [%[a_ptr], #0x10]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x20\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #6\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "b 4f\n"
+ "3:\n"
+ "udot z8.s, z4.b, z0.b[0]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z9.s, z4.b, z0.b[1]\n"
+ "ld1rqb z3.b, p0/z, [%[a_ptr], #-0x10]\n"
+ "udot z10.s, z4.b, z0.b[2]\n"
+ "addvl %[b_ptr], %[b_ptr], #3\n"
+ "udot z11.s, z4.b, z0.b[3]\n"
+ "udot z20.s, z4.b, z1.b[0]\n"
+ "udot z21.s, z4.b, z1.b[1]\n"
+ "udot z22.s, z4.b, z1.b[2]\n"
+ "udot z23.s, z4.b, z1.b[3]\n"
+ "ld1b z4.b, p0/z, [%[b_ptr], #-3, MUL VL]\n"
+ "udot z12.s, z5.b, z0.b[0]\n"
+ "udot z13.s, z5.b, z0.b[1]\n"
+ "udot z14.s, z5.b, z0.b[2]\n"
+ "udot z15.s, z5.b, z0.b[3]\n"
+ "udot z24.s, z5.b, z1.b[0]\n"
+ "udot z25.s, z5.b, z1.b[1]\n"
+ "udot z26.s, z5.b, z1.b[2]\n"
+ "udot z27.s, z5.b, z1.b[3]\n"
+ "ld1b z5.b, p0/z, [%[b_ptr], #-2, MUL VL]\n"
+ "udot z16.s, z6.b, z0.b[0]\n"
+ "udot z17.s, z6.b, z0.b[1]\n"
+ "udot z18.s, z6.b, z0.b[2]\n"
+ "udot z19.s, z6.b, z0.b[3]\n"
+ "udot z28.s, z6.b, z1.b[0]\n"
+ "udot z29.s, z6.b, z1.b[1]\n"
+ "udot z30.s, z6.b, z1.b[2]\n"
+ "udot z31.s, z6.b, z1.b[3]\n"
+ "ld1b z6.b, p0/z, [%[b_ptr], #-1, MUL VL]\n"
+ "udot z8.s, z4.b, z2.b[0]\n"
+ "udot z9.s, z4.b, z2.b[1]\n"
+ "udot z10.s, z4.b, z2.b[2]\n"
+ "udot z11.s, z4.b, z2.b[3]\n"
+ "udot z20.s, z4.b, z3.b[0]\n"
+ "st1w z8.s, p0, [%[c_ptr]]\n"
+ "udot z21.s, z4.b, z3.b[1]\n"
+ "udot z22.s, z4.b, z3.b[2]\n"
+ "udot z23.s, z4.b, z3.b[3]\n"
+ "udot z12.s, z5.b, z2.b[0]\n"
+ "udot z13.s, z5.b, z2.b[1]\n"
+ "udot z14.s, z5.b, z2.b[2]\n"
+ "udot z15.s, z5.b, z2.b[3]\n"
+ "udot z24.s, z5.b, z3.b[0]\n"
+ "st1w z12.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "udot z25.s, z5.b, z3.b[1]\n"
+ "udot z26.s, z5.b, z3.b[2]\n"
+ "udot z27.s, z5.b, z3.b[3]\n"
+ "udot z16.s, z6.b, z2.b[0]\n"
+ "udot z17.s, z6.b, z2.b[1]\n"
+ "udot z18.s, z6.b, z2.b[2]\n"
+ "udot z19.s, z6.b, z2.b[3]\n"
+ "udot z28.s, z6.b, z3.b[0]\n"
+ "st1w z16.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "udot z29.s, z6.b, z3.b[1]\n"
+ "udot z30.s, z6.b, z3.b[2]\n"
+ "udot z31.s, z6.b, z3.b[3]\n"
+ "4:\n"
+ "st1w z9.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z13.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z17.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z10.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z14.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #16\n"
+ "st1w z18.s, p0, [%[c_ptr], #-8, MUL VL]\n"
+ "st1w z11.s, p0, [%[c_ptr], #-7, MUL VL]\n"
+ "st1w z15.s, p0, [%[c_ptr], #-6, MUL VL]\n"
+ "st1w z19.s, p0, [%[c_ptr], #-5, MUL VL]\n"
+ "st1w z20.s, p0, [%[c_ptr], #-4, MUL VL]\n"
+ "st1w z24.s, p0, [%[c_ptr], #-3, MUL VL]\n"
+ "st1w z28.s, p0, [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z21.s, p0, [%[c_ptr], #-1, MUL VL]\n"
+ "st1w z25.s, p0, [%[c_ptr]]\n"
+ "st1w z29.s, p0, [%[c_ptr], #1, MUL VL]\n"
+ "st1w z22.s, p0, [%[c_ptr], #2, MUL VL]\n"
+ "st1w z26.s, p0, [%[c_ptr], #3, MUL VL]\n"
+ "st1w z30.s, p0, [%[c_ptr], #4, MUL VL]\n"
+ "st1w z23.s, p0, [%[c_ptr], #5, MUL VL]\n"
+ "st1w z27.s, p0, [%[c_ptr], #6, MUL VL]\n"
+ "st1w z31.s, p0, [%[c_ptr], #7, MUL VL]\n"
+ "addvl %[c_ptr], %[c_ptr], #8\n"
+ : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [loops] "+r" (loops), [tails] "+r" (tails)
+ :
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp
new file mode 100644
index 0000000000..9d7f593fd0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8.hpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+#include "../std_transforms_sve.hpp"
+
+namespace arm_gemm {
+
+// Actual kernel implementations
+void sve_sgemm_3VLx8(const float *, const float *, float *, int, int, int);
+
+// 3VLx8 SGEMM "strategy" class.
+//
+// This describes the characteristics of a family of kernels, in terms of
+// the required interleave properties and the output block size.
+//
+// All kernels in the family must share these characteristics. The actual
+// kernel to be used can be chosen at runtime, based on the CPUInfo
+// structure.
+class sgemm_3VLx8 {
+public:
+ typedef float operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
+
+ /* Kernel blocking parameters */
+
+ /* Width depends on vector length - use CNTW to compute the right value */
+ static int out_width() {
+ return svcntw() * 3;
+ }
+
+ static int out_height() {
+ return 8;
+ }
+
+ static int k_unroll() {
+ return 1;
+ }
+
+ // Use the standard SVE transforms.
+ StdTransformsSVE<operand_type, result_type, 8, 3> transforms;
+
+ kern_type kernel=sve_sgemm_3VLx8;
+
+ sgemm_3VLx8(const CPUInfo *ci) { }
+};
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp
new file mode 100644
index 0000000000..fd6f0b7f98
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_sgemm_3VLx8/generic.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __ARM_FEATURE_SVE
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+#include "../../asmlib.hpp"
+
+// Kernel implementation.
+//
+// Assume that "Apanel" points to a chunk of A blocks (each size 8xK) in read-order.
+// Assume that "Bpanel" points to a chunk of B blocks (each size 3VLxK) in read-order.
+// Assume that "Cpanel" points to a chunk of C output blocks (each size
+// 3VLx8), the chunks being arranged in a row major fashion.
+//
+// Note that the intent of this is that either ablocks or bblocks will be 1
+// - this construction allows the output loop to proceed in either order.
+
+namespace arm_gemm {
+
+void sve_sgemm_3VLx8(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) {
+ const float *a_ptr = Apanel;
+ float *c_ptr = Cpanel;
+
+ // There's no predication inside the kernel, so get a true predicate to use everywhere.
+ svbool_t ptrue = svptrue_b32();
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const float *a_ptr0 = a_ptr;
+ const float *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ // Fix up for odd lengths - set a flag if K is odd, but make
+ // sure we round up the iteration count.
+ int oddk = (K & 1);
+ int k = ((K+1)/2) - 1;
+
+ register svfloat32_t a0 asm("z0");
+ register svfloat32_t a1 asm("z1");
+ register svfloat32_t b0 asm("z2");
+ register svfloat32_t b1 asm("z3");
+ register svfloat32_t b2 asm("z4");
+ register svfloat32_t a0a asm("z5");
+ register svfloat32_t a1a asm("z6");
+
+ // Note: All prefetches commented out for now, but left in place as documentation for how it was done on NEON.
+ // Actual prefetches to be added once test hardware is available.
+ __asm __volatile (
+ // Initialize result registers, load initial operands, prime prefetches.
+ "mov z8.s, #0\n"
+ "ld1rqw %[a0].S, %[ptrue]/Z, [%[a_ptr]]\n"
+ "mov z9.s, #0\n"
+ "ld1w %[b0].S, %[ptrue]/Z, [%[b_ptr]]\n"
+ "mov z10.s, #0\n"
+ "ld1rqw %[a1].S, %[ptrue]/Z, [%[a_ptr], #0x10]\n"
+ "mov z11.s, #0\n"
+ "ld1w %[b1].S, %[ptrue]/Z, [%[b_ptr], #1, MUL VL]\n"
+ "mov z12.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #64]")
+ "mov z13.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #64]")
+ "mov z14.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #128]")
+ "mov z15.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #128]")
+ "mov z16.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #192]")
+ "mov z17.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #256]")
+ "mov z18.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #192]")
+ "mov z19.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #320]")
+ "mov z20.s, #0\n"
+ //ASM_PREFETCH("[%[a_ptr], #256]")
+ "mov z21.s, #0\n"
+ //ASM_PREFETCH("[%[b_ptr], #384]")
+ "mov z22.s, #0\n"
+ "mov z23.s, #0\n"
+ "mov z24.s, #0\n"
+ "mov z25.s, #0\n"
+ "mov z26.s, #0\n"
+ "mov z27.s, #0\n"
+ "mov z28.s, #0\n"
+ "mov z29.s, #0\n"
+ "mov z30.s, #0\n"
+ "mov z31.s, #0\n"
+
+ // Skip loop if we are doing zero iterations of it.
+ "cbz %w[k], 4f\n"
+
+ // Loop proper
+ "1:\n"
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "ld1rqw %[a0a].s, %[ptrue]/Z, [%[a_ptr], #0x20]\n"
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "ld1rqw %[a1a].s, %[ptrue]/Z, [%[a_ptr], #0x30]\n"
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #3, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ //ASM_PREFETCH("[%[a_ptr], #320]")
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #4, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ //ASM_PREFETCH("[%[b_ptr], #448]")
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #5, MUL VL]\n"
+
+ "fmla z8.s , %[b0].s, %[a0a].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0a].s[1]\n"
+ "ld1rqw %[a0].s, %[ptrue]/Z, [%[a_ptr], #0x40]\n"
+ "fmla z10.s, %[b0].s, %[a0a].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0a].s[3]\n"
+ "fmla z12.s, %[b0].s, %[a1a].s[0]\n"
+ "ld1rqw %[a1].s, %[ptrue]/Z, [%[a_ptr], #0x50]\n"
+ "fmla z13.s, %[b0].s, %[a1a].s[1]\n"
+ "fmla z14.s, %[b0].s, %[a1a].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1a].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #6, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0a].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0a].s[1]\n"
+ //ASM_PREFETCH("[%[b_ptr], #512]")
+ "fmla z18.s, %[b1].s, %[a0a].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0a].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1a].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1a].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1a].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1a].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #7, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0a].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x40\n"
+ "fmla z26.s, %[b2].s, %[a0a].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0a].s[3]\n"
+ "incb %[b_ptr], ALL, MUL #6\n"
+ "fmla z28.s, %[b2].s, %[a1a].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1a].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla z30.s, %[b2].s, %[a1a].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1a].s[3]\n"
+ "bne 1b\n"
+
+ // Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
+ "4:\n"
+
+ // Branch to alternative tail for odd K
+ "cbnz %w[oddk], 2f\n"
+
+ // Detached final iteration (even K)
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "ld1rqw %[a0a].s, %[ptrue]/Z, [%[a_ptr], #0x20]\n"
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "ld1rqw %[a1a].s, %[ptrue]/Z, [%[a_ptr], #0x30]\n"
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "ld1w %[b0].s, %[ptrue]/Z, [%[b_ptr], #3, MUL VL]\n"
+
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "ld1w %[b1].s, %[ptrue]/Z, [%[b_ptr], #4, MUL VL]\n"
+
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #5, MUL VL]\n"
+
+ "fmla z8.s , %[b0].s, %[a0a].s[0]\n"
+ "fmla z16.s, %[b1].s, %[a0a].s[0]\n"
+ "incb %[b_ptr], ALL, MUL #6\n"
+ "fmla z9.s , %[b0].s, %[a0a].s[1]\n"
+ "st1w z8.s, %[ptrue], [%[c_ptr]]\n"
+ "fmla z17.s, %[b1].s, %[a0a].s[1]\n"
+ "st1w z16.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z24.s, %[b2].s, %[a0a].s[0]\n"
+ "st1w z24.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+
+ "fmla z25.s, %[b2].s, %[a0a].s[1]\n"
+ "st1w z9.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+ "fmla z10.s, %[b0].s, %[a0a].s[2]\n"
+ "st1w z17.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z18.s, %[b1].s, %[a0a].s[2]\n"
+ "st1w z25.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z26.s, %[b2].s, %[a0a].s[2]\n"
+ "st1w z10.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z11.s, %[b0].s, %[a0a].s[3]\n"
+ "st1w z18.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z19.s, %[b1].s, %[a0a].s[3]\n"
+ "st1w z26.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z27.s, %[b2].s, %[a0a].s[3]\n"
+ "st1w z11.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "fmla z12.s, %[b0].s, %[a1a].s[0]\n"
+ "st1w z19.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "fmla z20.s, %[b1].s, %[a1a].s[0]\n"
+ "st1w z27.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ "fmla z28.s, %[b2].s, %[a1a].s[0]\n"
+ "st1w z12.s, %[ptrue], [%[c_ptr]]\n"
+
+ "fmla z13.s, %[b0].s, %[a1a].s[1]\n"
+ "st1w z20.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z21.s, %[b1].s, %[a1a].s[1]\n"
+ "st1w z28.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z29.s, %[b2].s, %[a1a].s[1]\n"
+ "st1w z13.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z14.s, %[b0].s, %[a1a].s[2]\n"
+ "st1w z21.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z22.s, %[b1].s, %[a1a].s[2]\n"
+ "st1w z29.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z30.s, %[b2].s, %[a1a].s[2]\n"
+ "st1w z14.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z15.s, %[b0].s, %[a1a].s[3]\n"
+ "st1w z22.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z23.s, %[b1].s, %[a1a].s[3]\n"
+ "st1w z30.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z31.s, %[b2].s, %[a1a].s[3]\n"
+ "st1w z15.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "b 3f\n"
+
+ // Detached final iteration (odd K)
+ "2:\n"
+ "fmla z8.s , %[b0].s, %[a0].s[0]\n"
+ "ld1w %[b2].s, %[ptrue]/Z, [%[b_ptr], #2, MUL VL]\n"
+ "fmla z16.s, %[b1].s, %[a0].s[0]\n"
+ "fmla z9.s , %[b0].s, %[a0].s[1]\n"
+ "st1w z8.s, %[ptrue], [%[c_ptr]]\n"
+ "fmla z17.s, %[b1].s, %[a0].s[1]\n"
+ "st1w z16.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z24.s, %[b2].s, %[a0].s[0]\n"
+ "incb %[b_ptr], all, mul #3\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "st1w z24.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z25.s, %[b2].s, %[a0].s[1]\n"
+ "st1w z9.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z10.s, %[b0].s, %[a0].s[2]\n"
+ "st1w z17.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z18.s, %[b1].s, %[a0].s[2]\n"
+ "st1w z25.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z26.s, %[b2].s, %[a0].s[2]\n"
+ "st1w z10.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z11.s, %[b0].s, %[a0].s[3]\n"
+ "st1w z18.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z19.s, %[b1].s, %[a0].s[3]\n"
+ "st1w z26.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z27.s, %[b2].s, %[a0].s[3]\n"
+ "st1w z11.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ "fmla z12.s, %[b0].s, %[a1].s[0]\n"
+ "st1w z19.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "fmla z20.s, %[b1].s, %[a1].s[0]\n"
+ "st1w z27.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ "fmla z28.s, %[b2].s, %[a1].s[0]\n"
+ "st1w z12.s, %[ptrue], [%[c_ptr]]\n"
+
+ "fmla z13.s, %[b0].s, %[a1].s[1]\n"
+ "st1w z20.s, %[ptrue], [%[c_ptr], #1, MUL VL]\n"
+ "fmla z21.s, %[b1].s, %[a1].s[1]\n"
+ "st1w z28.s, %[ptrue], [%[c_ptr], #2, MUL VL]\n"
+ "fmla z29.s, %[b2].s, %[a1].s[1]\n"
+ "st1w z13.s, %[ptrue], [%[c_ptr], #3, MUL VL]\n"
+
+ "fmla z14.s, %[b0].s, %[a1].s[2]\n"
+ "st1w z21.s, %[ptrue], [%[c_ptr], #4, MUL VL]\n"
+ "fmla z22.s, %[b1].s, %[a1].s[2]\n"
+ "st1w z29.s, %[ptrue], [%[c_ptr], #5, MUL VL]\n"
+ "fmla z30.s, %[b2].s, %[a1].s[2]\n"
+ "st1w z14.s, %[ptrue], [%[c_ptr], #6, MUL VL]\n"
+
+ "fmla z15.s, %[b0].s, %[a1].s[3]\n"
+ "st1w z22.s, %[ptrue], [%[c_ptr], #7, MUL VL]\n"
+ "incb %[c_ptr], all, mul #12\n"
+ "fmla z23.s, %[b1].s, %[a1].s[3]\n"
+ "st1w z30.s, %[ptrue], [%[c_ptr], #-4, MUL VL]\n"
+ "fmla z31.s, %[b2].s, %[a1].s[3]\n"
+ "st1w z15.s, %[ptrue], [%[c_ptr], #-3, MUL VL]\n"
+
+ // Common tail
+ "3:\n"
+ "st1w z23.s, %[ptrue], [%[c_ptr], #-2, MUL VL]\n"
+ "st1w z31.s, %[ptrue], [%[c_ptr], #-1, MUL VL]\n"
+ :
+ [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
+ [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k)
+ : [oddk] "r" (oddk), [ptrue] "Upl" (ptrue)
+ : "x20", "x21", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18",
+ "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
+ "cc", "memory"
+ );
+ }
+ }
+}
+
+} // namespace arm_gemm
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/merges/list.hpp b/src/core/NEON/kernels/arm_gemm/merges/list.hpp
index d93f1b0e6e..181d1a4e3f 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/list.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/list.hpp
@@ -26,3 +26,5 @@
#include "a64_merge_float_to_half_12x8.hpp"
#include "a64_merge_half_24x8.hpp"
#include "a64_merge_int32_12x8.hpp"
+#include "sve_merge_fp32_2VLx8.hpp"
+#include "sve_merge_fp32_3VLx8.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp
new file mode 100644
index 0000000000..7479c8d77c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_2VLx8.hpp
@@ -0,0 +1,1208 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+inline void MergeResults<2, 8, true>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
+{
+ const float *inptr = in;
+
+ for (int y=y0; y<ymax; y+=8) {
+ float *outptr0 = out + (y * ldout) + x0;
+ float *outptr1 = outptr0 + ldout;
+ float *outptr2 = outptr1 + ldout;
+ float *outptr3 = outptr2 + ldout;
+ float *outptr4 = outptr3 + ldout;
+ float *outptr5 = outptr4 + ldout;
+ float *outptr6 = outptr5 + ldout;
+ float *outptr7 = outptr6 + ldout;
+
+ const int height = ymax - y;
+
+ for (int i=x0; i<xmax; i+=(2 * get_vector_length<float>())) {
+ if (beta==0.0f)
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z7.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "prfm PSTL1KEEP, [%[outptr7], #0x40]\n"
+ "addvl %[outptr7], %[outptr7], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ else
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x100]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x140]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z11.s, p0/z, [%[outptr7]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x40]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #2\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x40]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr7], #1, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #2\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x40]\n"
+ "addvl %[outptr4], %[outptr4], #2\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x40]\n"
+ "addvl %[outptr5], %[outptr5], #2\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x40]\n"
+ "addvl %[outptr6], %[outptr6], #2\n"
+ "prfm PLDL1KEEP, [%[outptr7], #0x40]\n"
+ "addvl %[outptr7], %[outptr7], #2\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #16\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp
new file mode 100644
index 0000000000..27084c3598
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_3VLx8.hpp
@@ -0,0 +1,1564 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+inline void MergeResults<3, 8, true>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
+{
+ const float *inptr = in;
+
+ for (int y=y0; y<ymax; y+=8) {
+ float *outptr0 = out + (y * ldout) + x0;
+ float *outptr1 = outptr0 + ldout;
+ float *outptr2 = outptr1 + ldout;
+ float *outptr3 = outptr2 + ldout;
+ float *outptr4 = outptr3 + ldout;
+ float *outptr5 = outptr4 + ldout;
+ float *outptr6 = outptr5 + ldout;
+ float *outptr7 = outptr6 + ldout;
+
+ const int height = ymax - y;
+
+ for (int i=x0; i<xmax; i+=(3 * get_vector_length<float>())) {
+ if (beta==0.0f)
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #4, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z7.s, p0/z, [x8, #5, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #6, MUL VL]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z6.s, p0/z, [x8, #4, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z10.s, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "ld1w z7.s, p0/z, [x8, #7, MUL VL]\n"
+ "prfm PSTL1KEEP, [%[outptr3], #0x60]\n"
+ "fmul z11.s, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PSTL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PSTL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PSTL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "prfm PSTL1KEEP, [%[outptr7], #0x60]\n"
+ "addvl %[outptr7], %[outptr7], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ else
+ {
+ switch(height) {
+ case 1:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 2:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 3:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 4:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 5:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z9.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 6:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ case 7:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z11.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z10.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr5], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #1, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr6], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+ default:
+ case 8:
+ {
+ long w = xmax - i;
+ long p = 0;
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ "mov z2.s, %s[alpha]\n"
+ "addvl x8, %[inptr], #16\n"
+ "mov z3.s, %s[beta]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0]]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr]]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0]]\n"
+ "ld1w z9.s, p0/z, [%[outptr1]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x180]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #3, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1]]\n"
+ "ld1w z10.s, p0/z, [%[outptr2]]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x240]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #6, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2]]\n"
+ "ld1w z11.s, p0/z, [%[outptr3]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3]]\n"
+ "ld1w z8.s, p0/z, [%[outptr4]]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-4, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4]]\n"
+ "ld1w z9.s, p0/z, [%[outptr5]]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #-1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5]]\n"
+ "ld1w z10.s, p0/z, [%[outptr6]]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #2, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6]]\n"
+ "ld1w z11.s, p0/z, [%[outptr7]]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7]]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #1, MUL VL]\n"
+ "incw %[p], all, mul #1\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #1, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x1c0]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #4, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #1, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x280]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [%[inptr], #7, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #1, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #1, MUL VL]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-3, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #1, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #1, MUL VL]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #1, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr6], #1, MUL VL]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #3, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #1, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr7], #1, MUL VL]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #6, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #1, MUL VL]\n"
+ "whilelt p0.s, %[p], %[w]\n"
+ "b.none 1f\n"
+ "ld1w z8.s, p0/z, [%[outptr0], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr0], #0x60]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [%[inptr], #2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr0], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr1], #2, MUL VL]\n"
+ "addvl %[outptr0], %[outptr0], #3\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [%[inptr], #5, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr1], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr2], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr1], #0x60]\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #-8, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr2], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr3], #2, MUL VL]\n"
+ "addvl %[outptr1], %[outptr1], #3\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #-5, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr3], #2, MUL VL]\n"
+ "ld1w z8.s, p0/z, [%[outptr4], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x200]\n"
+ "fmul z8.s, z8.s, z3.s\n"
+ "ld1w z4.s, p0/z, [x8, #-2, MUL VL]\n"
+ "fmla z8.s, p0/m, z4.s, z2.s\n"
+ "st1w z8.s, p0, [%[outptr4], #2, MUL VL]\n"
+ "ld1w z9.s, p0/z, [%[outptr5], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr2], #0x60]\n"
+ "fmul z9.s, z9.s, z3.s\n"
+ "ld1w z5.s, p0/z, [x8, #1, MUL VL]\n"
+ "fmla z9.s, p0/m, z5.s, z2.s\n"
+ "st1w z9.s, p0, [%[outptr5], #2, MUL VL]\n"
+ "ld1w z10.s, p0/z, [%[outptr6], #2, MUL VL]\n"
+ "addvl %[outptr2], %[outptr2], #3\n"
+ "fmul z10.s, z10.s, z3.s\n"
+ "ld1w z6.s, p0/z, [x8, #4, MUL VL]\n"
+ "fmla z10.s, p0/m, z6.s, z2.s\n"
+ "st1w z10.s, p0, [%[outptr6], #2, MUL VL]\n"
+ "ld1w z11.s, p0/z, [%[outptr7], #2, MUL VL]\n"
+ "prfm PLDL1KEEP, [%[outptr3], #0x60]\n"
+ "fmul z11.s, z11.s, z3.s\n"
+ "ld1w z7.s, p0/z, [x8, #7, MUL VL]\n"
+ "fmla z11.s, p0/m, z7.s, z2.s\n"
+ "st1w z11.s, p0, [%[outptr7], #2, MUL VL]\n"
+ "addvl %[outptr3], %[outptr3], #3\n"
+ "prfm PLDL1KEEP, [%[outptr4], #0x60]\n"
+ "addvl %[outptr4], %[outptr4], #3\n"
+ "prfm PLDL1KEEP, [%[outptr5], #0x60]\n"
+ "addvl %[outptr5], %[outptr5], #3\n"
+ "prfm PLDL1KEEP, [%[inptr], #0x2c0]\n"
+ "prfm PLDL1KEEP, [%[outptr6], #0x60]\n"
+ "addvl %[outptr6], %[outptr6], #3\n"
+ "prfm PLDL1KEEP, [%[outptr7], #0x60]\n"
+ "addvl %[outptr7], %[outptr7], #3\n"
+ "1:\n"
+ "addvl %[inptr], %[inptr], #24\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3), [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr), [p] "+r" (p)
+ : [alpha] "w" (alpha), [beta] "w" (beta), [w] "r" (w)
+ : "x8", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "memory", "cc"
+ );
+ }
+ break;
+
+
+ }
+ }
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
new file mode 100644
index 0000000000..b7323ebaea
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include "mergeresults.hpp"
+#include "transform.hpp"
+
+namespace arm_gemm {
+
+/*
+ * Define "standard" transforms for the blocked GEMMs for SVE.
+ *
+ * This assumes that A is interleaved 'height' ways, B is interleaved
+ * 'width'xVL ways and transposed, and that the merge needs to work in
+ * 'height' x 'width'xVL blocks.
+ *
+ * The optional 'block' parameter is for kernels using dot-product type
+ * instructions like UDOT and SDOT.
+ */
+template<typename TOperand, typename TResult, unsigned int height, unsigned int width_vectors, unsigned int block=1, unsigned int mmla=1>
+class StdTransformsSVE
+{
+public:
+ template<typename TIn>
+ void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
+ const int ymax, const int k0, const int kmax, bool transposed) {
+ if (transposed) {
+ Transform<height, block, true>(out, in, stride, y0, ymax, k0, kmax);
+ } else {
+ Transform<height, block, false>(out, in, stride, y0, ymax, k0, kmax);
+ }
+ }
+
+ template<typename TIn>
+ void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
+ const int xmax, const int k0, const int kmax, bool transposed) {
+ if (transposed) {
+ Transform<width_vectors, block, false, true>(out, in, stride, x0, xmax, k0, kmax);
+ } else {
+ Transform<width_vectors, block, true, true>(out, in, stride, x0, xmax, k0, kmax);
+ }
+ }
+
+ template<typename TOut>
+ void Merge(TOut *out, const TResult *in, int stride, int y0, int ymax, int x0, int xmax, const TOut alpha, const TOut beta) {
+ MergeResults<width_vectors / mmla, height, true>(out, in, stride, y0, ymax, x0, xmax, alpha, beta);
+ }
+};
+
+} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/transform.hpp b/src/core/NEON/kernels/arm_gemm/transform.hpp
index 77d0d87a4d..e422b91c83 100644
--- a/src/core/NEON/kernels/arm_gemm/transform.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transform.hpp
@@ -40,7 +40,7 @@ struct TransformImpl {
static void Transform(TOut* out, const TIn* const in, const int stride,
const int y0, const int ymax, const int x0, const int xmax) {
// For SVE cases we multiply the interleave factor by the vector length.
- const unsigned int IntBy = tIntBy * (sve ? get_vector_length<TOut>() : 1);
+ const unsigned int IntBy = tIntBy * (sve ? get_vector_length<TOut>() / BlockBy : 1);
const int n_whole_y_blocks = (ymax - y0) / IntBy;
const int y_remainders = (ymax - y0) % IntBy;
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
index 8ad5b857fb..17328a5d6a 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
@@ -23,9 +23,17 @@
*/
#include "a32_interleave_6way_32bit.hpp"
#include "a32_transpose_interleave_8way_32bit.hpp"
+#ifdef __ARM_FEATURE_SVE
+#include "sve_interleave_8way_32bit.hpp"
+#include "sve_interleave_8way_block2_16bit.hpp"
+#include "sve_interleave_8way_block2_32bit.hpp"
+#include "sve_interleave_8way_block4_16bit.hpp"
+#include "sve_interleave_8way_block4_8bit.hpp"
+#else
+#include "a64_interleave_8way_32bit.hpp"
+#endif
#include "a64_block16_interleave4_8bit.hpp"
#include "a64_interleave_8way_16bit.hpp"
-#include "a64_interleave_8way_32bit.hpp"
#include "a64_interleave_8way_half_to_float.hpp"
#include "a64_transpose_interleave_12way_16bit.hpp"
#include "a64_transpose_interleave_12way_half_to_float.hpp"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp
new file mode 100644
index 0000000000..752e837f8d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 1, false, 4, 4, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint32_t *master_outptr = reinterpret_cast<uint32_t *>(out);
+ const uint32_t *inptr = reinterpret_cast<const uint32_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = inwidth * 8;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint32_t *outptr = master_outptr;
+ master_outptr += outwidth;
+
+ const uint32_t *inptr0 = inptr + y * ldin + k0;
+ const uint32_t *inptr1 = inptr0 + ldin;
+ const uint32_t *inptr2 = inptr1 + ldin;
+ const uint32_t *inptr3 = inptr2 + ldin;
+ const uint32_t *inptr4 = inptr3 + ldin;
+ const uint32_t *inptr5 = inptr4 + ldin;
+ const uint32_t *inptr6 = inptr5 + ldin;
+ const uint32_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "mov z14.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "mov z14.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z12.s, z2.s, z5.s\n"
+ "zip2 z13.s, z2.s, z5.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z5.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z5.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z6.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n"
+ "ld1w z6.s, p0/z, [%[inptr6], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n"
+ "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n"
+ "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n"
+ "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n"
+ "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n"
+ "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n"
+ "ld1w z6.s, p0/z, [%[inptr6], %[inpos], LSL #2]\n"
+ "ld1w z7.s, p0/z, [%[inptr7], %[inpos], LSL #2]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.s, %[outpos], %[outwidth]\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.s, %[outpos], %[outwidth]\n"
+ "incw %[outpos], all, mul #1\n"
+ "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n"
+ "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n"
+ "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp
new file mode 100644
index 0000000000..63c21be6bb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_16bit.hpp
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 2, false, 2, 2, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint16_t *master_outptr = reinterpret_cast<uint16_t *>(out);
+ const uint16_t *inptr = reinterpret_cast<const uint16_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = (inwidth * 8 + 1) / 2;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint16_t *outptr = master_outptr;
+ master_outptr += (outwidth * 2);
+
+ const uint16_t *inptr0 = inptr + y * ldin + k0;
+ const uint16_t *inptr1 = inptr0 + ldin;
+ const uint16_t *inptr2 = inptr1 + ldin;
+ const uint16_t *inptr3 = inptr2 + ldin;
+ const uint16_t *inptr4 = inptr3 + ldin;
+ const uint16_t *inptr5 = inptr4 + ldin;
+ const uint16_t *inptr6 = inptr5 + ldin;
+ const uint16_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip1 z12.s, z2.s, z5.s\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.s, z2.s, z5.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z14.s, z3.s, z5.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip2 z15.s, z3.s, z5.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z14.s, z3.s, z6.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z15.s, z3.s, z6.s\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "ld1h z7.h, p0/z, [%[inptr7]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "addvl %[inptr7], %[inptr7], #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "st1w z8.s, p0, [%[outptr]]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.s, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "st1w z12.s, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incw %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p1.s, %[outpos], %[outwidth]\n"
+ "st1w z13.s, p1, [%[outptr], #5, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p2.s, %[outpos], %[outwidth]\n"
+ "st1w z14.s, p2, [%[outptr], #6, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "whilelt p3.s, %[outpos], %[outwidth]\n"
+ "st1w z15.s, p3, [%[outptr], #7, MUL VL]\n"
+ "incw %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp
new file mode 100644
index 0000000000..4cc4311cee
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 2, false, 4, 4, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint32_t *master_outptr = reinterpret_cast<uint32_t *>(out);
+ const uint32_t *inptr = reinterpret_cast<const uint32_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = (inwidth * 8 + 1) / 2;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint32_t *outptr = master_outptr;
+ master_outptr += (outwidth * 2);
+
+ const uint32_t *inptr0 = inptr + y * ldin + k0;
+ const uint32_t *inptr1 = inptr0 + ldin;
+ const uint32_t *inptr2 = inptr1 + ldin;
+ const uint32_t *inptr3 = inptr2 + ldin;
+ const uint32_t *inptr4 = inptr3 + ldin;
+ const uint32_t *inptr5 = inptr4 + ldin;
+ const uint32_t *inptr6 = inptr5 + ldin;
+ const uint32_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.s, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.s, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "zip1 z12.d, z2.d, z5.d\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z14.d, z3.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip2 z15.d, z3.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z5.s, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z14.d, z3.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z15.d, z3.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.s, #0\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z5.s, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z6.s, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.s, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1w z0.s, p0/z, [%[inptr0]]\n"
+ "incw %[inpos], all, mul #1\n"
+ "ld1w z1.s, p0/z, [%[inptr1]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1w z2.s, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "ld1w z3.s, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "ld1w z4.s, p0/z, [%[inptr4]]\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1w z5.s, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1w z6.s, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1w z7.s, p0/z, [%[inptr7]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "addvl %[inptr7], %[inptr7], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp
new file mode 100644
index 0000000000..f493786320
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_16bit.hpp
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 4, false, 2, 2, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint16_t *master_outptr = reinterpret_cast<uint16_t *>(out);
+ const uint16_t *inptr = reinterpret_cast<const uint16_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = (inwidth * 8 + 3) / 4;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint16_t *outptr = master_outptr;
+ master_outptr += (outwidth * 4);
+
+ const uint16_t *inptr0 = inptr + y * ldin + k0;
+ const uint16_t *inptr1 = inptr0 + ldin;
+ const uint16_t *inptr2 = inptr1 + ldin;
+ const uint16_t *inptr3 = inptr2 + ldin;
+ const uint16_t *inptr4 = inptr3 + ldin;
+ const uint16_t *inptr5 = inptr4 + ldin;
+ const uint16_t *inptr6 = inptr5 + ldin;
+ const uint16_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z0.d, z8.d, z4.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z4.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z4.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "mov z14.h, #0\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z14.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip1 z10.d, z1.d, z4.d\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z11.d, z1.d, z4.d\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z12.d, z2.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z13.d, z2.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z4.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z15.d, z3.d, z4.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip1 z12.d, z2.d, z5.d\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip2 z13.d, z2.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip1 z14.d, z3.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip2 z15.d, z3.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z14.d, z3.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z15.d, z3.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.h, #0\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.h, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1h z0.h, p0/z, [%[inptr0]]\n"
+ "inch %[inpos], all, mul #1\n"
+ "ld1h z1.h, p0/z, [%[inptr1]]\n"
+ "addvl %[inptr0], %[inptr0], #1\n"
+ "ld1h z2.h, p0/z, [%[inptr2]]\n"
+ "addvl %[inptr1], %[inptr1], #1\n"
+ "ld1h z3.h, p0/z, [%[inptr3]]\n"
+ "addvl %[inptr2], %[inptr2], #1\n"
+ "ld1h z4.h, p0/z, [%[inptr4]]\n"
+ "addvl %[inptr3], %[inptr3], #1\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "ld1h z5.h, p0/z, [%[inptr5]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "ld1h z6.h, p0/z, [%[inptr6]]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "ld1h z7.h, p0/z, [%[inptr7]]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "addvl %[inptr4], %[inptr4], #1\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "addvl %[inptr5], %[inptr5], #1\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "addvl %[inptr6], %[inptr6], #1\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "addvl %[inptr7], %[inptr7], #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip1 z0.d, z8.d, z12.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z1.d, z8.d, z12.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "zip1 z2.d, z9.d, z13.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z3.d, z9.d, z13.d\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "zip1 z4.d, z10.d, z14.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z5.d, z10.d, z14.d\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "zip1 z6.d, z11.d, z15.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z7.d, z11.d, z15.d\n"
+ "zip1 z8.d, z0.d, z4.d\n"
+ "st1d z8.d, p0, [%[outptr]]\n"
+ "zip2 z9.d, z0.d, z4.d\n"
+ "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n"
+ "zip1 z10.d, z1.d, z5.d\n"
+ "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n"
+ "zip2 z11.d, z1.d, z5.d\n"
+ "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n"
+ "zip1 z12.d, z2.d, z6.d\n"
+ "whilelt p0.d, %[outpos], %[outwidth]\n"
+ "zip2 z13.d, z2.d, z6.d\n"
+ "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n"
+ "zip1 z14.d, z3.d, z7.d\n"
+ "incd %[outpos], all, mul #1\n"
+ "zip2 z15.d, z3.d, z7.d\n"
+ "whilelt p1.d, %[outpos], %[outwidth]\n"
+ "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p2.d, %[outpos], %[outwidth]\n"
+ "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "whilelt p3.d, %[outpos], %[outwidth]\n"
+ "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n"
+ "incd %[outpos], all, mul #1\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp
new file mode 100644
index 0000000000..f1690baf43
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2018 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __ARM_FEATURE_SVE
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 4, false, 1, 1, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax)
+{
+ uint8_t *master_outptr = reinterpret_cast<uint8_t *>(out);
+ const uint8_t *inptr = reinterpret_cast<const uint8_t *>(in);
+
+ for (int y=y0; y<ymax; y+=8)
+ {
+ const int height = ymax-y;
+ const long inwidth = (kmax - k0);
+ const long outwidth = ((inwidth + 3) / 4) * 32;
+ long inpos = 0;
+ long outpos = 0;
+
+ uint8_t *outptr = master_outptr;
+ master_outptr += outwidth;
+
+ const uint8_t *inptr0 = inptr + y * ldin + k0;
+ const uint8_t *inptr1 = inptr0 + ldin;
+ const uint8_t *inptr2 = inptr1 + ldin;
+ const uint8_t *inptr3 = inptr2 + ldin;
+ const uint8_t *inptr4 = inptr3 + ldin;
+ const uint8_t *inptr5 = inptr4 + ldin;
+ const uint8_t *inptr6 = inptr5 + ldin;
+ const uint8_t *inptr7 = inptr6 + ldin;
+
+ switch(height)
+ {
+ case 1:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 2:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "mov z14.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z0.s, z8.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z1.s, z8.s, z4.s\n"
+ "zip1 z2.s, z9.s, z4.s\n"
+ "zip2 z3.s, z9.s, z4.s\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 3:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "mov z14.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z14.s\n"
+ "zip2 z7.s, z11.s, z14.s\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 4:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z4.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z4.s\n"
+ "zip2 z11.s, z1.s, z4.s\n"
+ "zip1 z12.s, z2.s, z4.s\n"
+ "zip2 z13.s, z2.s, z4.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z4.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 5:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z5.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z12.s, z2.s, z5.s\n"
+ "zip2 z13.s, z2.s, z5.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z5.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z5.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 6:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z6.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "ld1b z5.b, p0/z, [%[inptr5], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z6.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ case 7:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "mov z7.b, #0\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "ld1b z5.b, p0/z, [%[inptr5], %[inpos]]\n"
+ "ld1b z6.b, p0/z, [%[inptr6], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+ default:
+ case 8:
+ __asm __volatile(
+ "1:\n"
+ "whilelt p0.b, %[inpos], %[inwidth]\n"
+ "b.none 2f\n"
+ "ld1b z0.b, p0/z, [%[inptr0], %[inpos]]\n"
+ "ld1b z1.b, p0/z, [%[inptr1], %[inpos]]\n"
+ "ld1b z2.b, p0/z, [%[inptr2], %[inpos]]\n"
+ "ld1b z3.b, p0/z, [%[inptr3], %[inpos]]\n"
+ "ld1b z4.b, p0/z, [%[inptr4], %[inpos]]\n"
+ "ld1b z5.b, p0/z, [%[inptr5], %[inpos]]\n"
+ "ld1b z6.b, p0/z, [%[inptr6], %[inpos]]\n"
+ "ld1b z7.b, p0/z, [%[inptr7], %[inpos]]\n"
+ "incb %[inpos], all, mul #1\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "whilelt p0.b, %[outpos], %[outwidth]\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "whilelt p1.b, %[outpos], %[outwidth]\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "zip1 z0.s, z8.s, z12.s\n"
+ "zip2 z1.s, z8.s, z12.s\n"
+ "zip1 z2.s, z9.s, z13.s\n"
+ "whilelt p2.b, %[outpos], %[outwidth]\n"
+ "zip2 z3.s, z9.s, z13.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z4.s, z10.s, z14.s\n"
+ "zip2 z5.s, z10.s, z14.s\n"
+ "zip1 z6.s, z11.s, z15.s\n"
+ "zip2 z7.s, z11.s, z15.s\n"
+ "whilelt p3.b, %[outpos], %[outwidth]\n"
+ "zip1 z8.s, z0.s, z4.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip2 z9.s, z0.s, z4.s\n"
+ "zip1 z10.s, z1.s, z5.s\n"
+ "zip2 z11.s, z1.s, z5.s\n"
+ "st1b z8.b, p0, [%[outptr]]\n"
+ "zip1 z12.s, z2.s, z6.s\n"
+ "whilelt p4.b, %[outpos], %[outwidth]\n"
+ "zip2 z13.s, z2.s, z6.s\n"
+ "incb %[outpos], all, mul #1\n"
+ "zip1 z14.s, z3.s, z7.s\n"
+ "st1b z9.b, p1, [%[outptr], #1, MUL VL]\n"
+ "zip2 z15.s, z3.s, z7.s\n"
+ "whilelt p5.b, %[outpos], %[outwidth]\n"
+ "st1b z10.b, p2, [%[outptr], #2, MUL VL]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z11.b, p3, [%[outptr], #3, MUL VL]\n"
+ "whilelt p6.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z12.b, p4, [%[outptr], #4, MUL VL]\n"
+ "whilelt p7.b, %[outpos], %[outwidth]\n"
+ "incb %[outpos], all, mul #1\n"
+ "st1b z13.b, p5, [%[outptr], #5, MUL VL]\n"
+ "st1b z14.b, p6, [%[outptr], #6, MUL VL]\n"
+ "st1b z15.b, p7, [%[outptr], #7, MUL VL]\n"
+ "addvl %[outptr], %[outptr], #8\n"
+ "b 1b\n"
+ "2:\n"
+ : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7)
+ : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth)
+ : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory"
+ );
+ break;
+
+
+ }
+ }
+}
+
+#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/utils.hpp b/src/core/NEON/kernels/arm_gemm/utils.hpp
index b77bc7a566..a1fc00ea89 100644
--- a/src/core/NEON/kernels/arm_gemm/utils.hpp
+++ b/src/core/NEON/kernels/arm_gemm/utils.hpp
@@ -24,6 +24,10 @@
#pragma once
+#ifdef __ARM_FEATURE_SVE
+#include <arm_sve.h>
+#endif
+
// Macro for unreachable code (e.g. impossible default cases on switch)
#define UNREACHABLE(why) __builtin_unreachable()
@@ -31,23 +35,27 @@
// #define UNREACHABLE(why) assert(0 && why)
inline int iceildiv(const int a, const int b) {
- return (a + b - 1) / b;
+ return (a + b - 1) / b;
}
template <typename T>
inline T roundup(const T a, const T b) {
- T rem = a % b;
+ T rem = a % b;
- if (rem) {
- return a + b - rem;
- } else {
- return a;
- }
+ if (rem) {
+ return a + b - rem;
+ } else {
+ return a;
+ }
}
template <typename T>
inline unsigned long get_vector_length() {
+#ifdef __ARM_FEATURE_SVE
+ const unsigned long length = svcntb();
+#else
const unsigned long length = 16;
+#endif
return length / sizeof(T);
-}
+} \ No newline at end of file
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
index 00e483c0f8..69842fec80 100644
--- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
+++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
@@ -37,6 +37,10 @@
#include "../arm_gemm/kernels/a64_gemm_u8_4x4.hpp"
#include "../arm_gemm/kernels/a64_hgemm_24x8.hpp"
#include "../arm_gemm/kernels/a64_sgemm_12x8.hpp"
+#include "../arm_gemm/kernels/sve_interleaved_fp16_mla_3VLx8.hpp"
+#include "../arm_gemm/kernels/sve_interleaved_fp32_mla_3VLx8.hpp"
+#include "../arm_gemm/kernels/sve_interleaved_s8s32_dot_3VLx8.hpp"
+#include "../arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8.hpp"
namespace arm_compute
{
@@ -54,6 +58,29 @@ struct Kernel
#define DEFINE_STRATEGY(strat) \
DEFINE_STRATEGY_SUFFIX(strat, "")
+#ifdef __ARM_FEATURE_SVE
+template <>
+struct Kernel<float, false>
+{
+ DEFINE_STRATEGY(interleaved_fp32_mla_3VLx8)
+};
+template <>
+struct Kernel<float16_t, false>
+{
+ DEFINE_STRATEGY(interleaved_fp16_mla_3VLx8)
+};
+template <bool use_dot>
+struct Kernel<int8_t, use_dot>
+{
+ DEFINE_STRATEGY(interleaved_s8s32_dot_3VLx8)
+};
+template <bool use_dot>
+struct Kernel<uint8_t, use_dot>
+{
+ DEFINE_STRATEGY(interleaved_u8u32_dot_3VLx8)
+};
+#else /* __ARM_FEATURE_SVE */
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
template <>
struct Kernel<float16_t, false>
@@ -96,6 +123,7 @@ struct Kernel<float, false>
DEFINE_STRATEGY(sgemm_8x6)
};
#endif /* __aarch64__ */
+#endif /* __ARM_FEATURE_SVE */
#undef DEFINE_STRATEGY
#undef DEFINE_STRATEGY_SUFFIX
diff --git a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp
index c2d7d6a9d9..6887a0a8cd 100644
--- a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp
@@ -27,7 +27,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/runtime/CPP/CPPScheduler.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
using namespace arm_compute;
using namespace arm_compute::misc::shape_calculator;
@@ -166,7 +166,7 @@ void NEDeconvolutionLayer::prepare()
// Run weights flipping and mark original weights tensor as unused
_weights_flipped.allocator()->allocate();
- CPPScheduler::get().schedule(&_flip_weights, Window::DimZ);
+ NEScheduler::get().schedule(&_flip_weights, Window::DimZ);
_original_weights->mark_as_unused();
// Prepare convolution
diff --git a/support/Semaphore.h b/support/Semaphore.h
index 6cdf196dee..636d5caf79 100644
--- a/support/Semaphore.h
+++ b/support/Semaphore.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -74,14 +74,17 @@ public:
Semaphore(int value = 0)
: _value(value)
{
+ (void)_value;
}
/** Signals a semaphore */
inline void signal()
{
+ (void)_value;
}
/** Waits on a semaphore */
inline void wait()
{
+ (void)_value;
}
private:
diff --git a/tests/AssetsLibrary.cpp b/tests/AssetsLibrary.cpp
index ee876f91e3..c6d86d1c1a 100644
--- a/tests/AssetsLibrary.cpp
+++ b/tests/AssetsLibrary.cpp
@@ -416,7 +416,7 @@ RawTensor AssetsLibrary::load_image(const std::string &name) const
const RawTensor &AssetsLibrary::find_or_create_raw_tensor(const std::string &name, Format format) const
{
- std::lock_guard<std::mutex> guard(_format_lock);
+ std::lock_guard<arm_compute::Mutex> guard(_format_lock);
const RawTensor *ptr = _cache.find(std::forward_as_tuple(name, format));
@@ -440,7 +440,7 @@ const RawTensor &AssetsLibrary::find_or_create_raw_tensor(const std::string &nam
const RawTensor &AssetsLibrary::find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const
{
- std::lock_guard<std::mutex> guard(_channel_lock);
+ std::lock_guard<arm_compute::Mutex> guard(_channel_lock);
const RawTensor *ptr = _cache.find(std::forward_as_tuple(name, format, channel));
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index b1c8c430ad..7af036d256 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -414,8 +414,8 @@ private:
const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const;
mutable TensorCache _cache{};
- mutable std::mutex _format_lock{};
- mutable std::mutex _channel_lock{};
+ mutable arm_compute::Mutex _format_lock{};
+ mutable arm_compute::Mutex _channel_lock{};
const std::string _library_path;
std::random_device::result_type _seed;
};
diff --git a/tests/SConscript b/tests/SConscript
index ac826f848d..24714efa74 100644
--- a/tests/SConscript
+++ b/tests/SConscript
@@ -124,7 +124,7 @@ if env['gles_compute']:
if env['os'] == 'android':
test_env.Append(LIBS = ["log"])
-else:
+elif env['os'] != 'bare_metal':
test_env.Append(LIBS = ["rt"])
if test_env['benchmark_tests']:
diff --git a/tests/TensorCache.h b/tests/TensorCache.h
index 7cf64ffbe5..c8f21116d6 100644
--- a/tests/TensorCache.h
+++ b/tests/TensorCache.h
@@ -26,6 +26,8 @@
#include "RawTensor.h"
+#include "support/Mutex.h"
+
#include <map>
#include <mutex>
#include <utility>
@@ -84,10 +86,10 @@ private:
using FormatMap = std::map<std::tuple<std::string, Format>, RawTensor>;
using ChannelMap = std::map<std::tuple<std::string, Format, Channel>, RawTensor>;
- FormatMap _raw_tensor_cache{};
- ChannelMap _raw_tensor_channel_cache{};
- std::mutex _raw_tensor_cache_mutex{};
- std::mutex _raw_tensor_channel_cache_mutex{};
+ FormatMap _raw_tensor_cache{};
+ ChannelMap _raw_tensor_channel_cache{};
+ arm_compute::Mutex _raw_tensor_cache_mutex{};
+ arm_compute::Mutex _raw_tensor_channel_cache_mutex{};
};
inline RawTensor *TensorCache::find(std::tuple<const std::string &, Format> key)
@@ -104,13 +106,13 @@ inline RawTensor *TensorCache::find(std::tuple<const std::string &, Format, Chan
inline RawTensor &TensorCache::add(std::tuple<const std::string &, Format> key, RawTensor raw)
{
- std::lock_guard<std::mutex> lock(_raw_tensor_channel_cache_mutex);
+ std::lock_guard<arm_compute::Mutex> lock(_raw_tensor_cache_mutex);
return std::get<0>(_raw_tensor_cache.emplace(std::move(key), std::move(raw)))->second;
}
inline RawTensor &TensorCache::add(std::tuple<const std::string &, Format, Channel> key, RawTensor raw)
{
- std::lock_guard<std::mutex> lock(_raw_tensor_channel_cache_mutex);
+ std::lock_guard<arm_compute::Mutex> lock(_raw_tensor_channel_cache_mutex);
return std::get<0>(_raw_tensor_channel_cache.emplace(std::move(key), std::move(raw)))->second;
}
} // namespace test
diff --git a/tests/framework/instruments/Instruments.h b/tests/framework/instruments/Instruments.h
index 705fc59b29..77c74b7b3e 100644
--- a/tests/framework/instruments/Instruments.h
+++ b/tests/framework/instruments/Instruments.h
@@ -24,10 +24,12 @@
#ifndef ARM_COMPUTE_TEST_INSTRUMENTS
#define ARM_COMPUTE_TEST_INSTRUMENTS
+#if !defined(BARE_METAL)
#include "MaliCounter.h"
#include "OpenCLMemoryUsage.h"
#include "OpenCLTimer.h"
#include "PMUCounter.h"
+#endif /* !defined(BARE_METAL) */
#include "SchedulerTimer.h"
#include "WallClockTimer.h"